Compare commits

..

377 Commits

Author SHA1 Message Date
Julien Fontanet
5bd468791f feat(xo-server-backup-reports): 0.12.0 2018-05-31 18:28:21 +02:00
Julien Fontanet
37f71bb36c feat(xo-acl-resolver): 0.2.4 2018-05-31 18:26:53 +02:00
Julien Fontanet
2ed4b7ad3f feat(vhd-lib): 0.1.1 2018-05-31 17:59:02 +02:00
Julien Fontanet
7eb970f22a feat(fs): 0.0.1 2018-05-31 17:57:13 +02:00
badrAZ
13db4a8411 feat(Backup NG): improve logs (#3013) 2018-05-31 17:54:35 +02:00
badrAZ
49a7a89bbf feat(xo-web/new-vm): ability to use template vars in the CloudConfig (#3006)
Fixes #2140
2018-05-31 17:44:52 +02:00
Rajaa.BARHTAOUI
0af8a60c1c fix(xo-web/SR/disks): show VM templates attached to VDIs (#3012)
Fixes #2974
2018-05-31 17:32:34 +02:00
Rajaa.BARHTAOUI
e1650b376c fix(xo-web/Self new VM): do not auto-select resource set's 1st SR (#3007)
New behaviour: either auto-select the template's pool's default SR if
it's in the resource set or do not auto-select any SR at all

Fixes #3001
2018-05-31 16:57:52 +02:00
Rajaa.BARHTAOUI
873b40cc70 feat(xo-server,xo-web): allow setting remote syslog host (#2958)
Fixes #2900
2018-05-31 16:22:39 +02:00
Rajaa.BARHTAOUI
d45265b180 feat(xo-web): create single-server private network (#3004)
Fixes #2944
2018-05-31 11:25:08 +02:00
badrAZ
ff50b2848e feat(xo-server,xo-web): allow pool admins to create VMs (#2995)
Fixes #2350
2018-05-31 10:42:03 +02:00
Julien Fontanet
d67fae22ab chore: initial PR template 2018-05-30 18:10:11 +02:00
badrAZ
d809002558 feat(Backup NG): ability to retry a single failed VM backup (#3009)
Fixes 2912
2018-05-30 17:04:50 +02:00
Pierre Donias
5c30559d15 feat(xo-server,xo-web): handle XCP-ng patches (#3005) 2018-05-30 16:03:43 +02:00
Rajaa.BARHTAOUI
cbb5b011e1 fix(xo-web/SR/disks): show control domain VMs attached to VDIs (#3002)
Fixes #2999
2018-05-30 15:55:44 +02:00
Julien Fontanet
f5bff408a8 feat(xo-server/exportDeltaVm): dont check chain if no snapshot
Should fix a race condition with XenServer DB during Delta Backup NG.
2018-05-30 12:22:10 +02:00
Nicolas Raynaud
d7cfe4d3dc fix(vhd/merge): fix performance enhancement (#2980) 2018-05-30 10:26:15 +02:00
badrAZ
7be8f38c6b fix(xo-web/jobs/edit): support all types of inputs (#2997) 2018-05-30 10:20:22 +02:00
badrAZ
08a7e605ce feat(xo-web/backup logs): show # of calls for each state (#2860) 2018-05-30 09:58:48 +02:00
Julien Fontanet
4b57db5893 feat(xo-server/delta NG): validate parent VHD 2018-05-29 16:36:33 +02:00
Julien Fontanet
8b1ae3f3c9 fix(xo-server/delta NG): dont select tmp file as parent 2018-05-29 16:36:10 +02:00
Julien Fontanet
77d35a5928 chore(fs/Handler#list): prepend dir after filtering 2018-05-29 16:34:51 +02:00
Julien Fontanet
323d409e6c chore(package): require yarn >1.7
Previous releases prevented xo-web from being built to an incorrect resolution of dependencies.
2018-05-29 16:29:14 +02:00
Pierre Donias
9f2f2b7b69 fix(xo-web/render-xo-item): show SR container name (#3003)
Fixes #3000
2018-05-29 12:01:50 +02:00
Julien Fontanet
b44fa7beca chore(xo-server): bump xo:perf threshold to 500ms 2018-05-29 10:53:14 +02:00
Julien Fontanet
6d4e310b8e chore(vhd-cli/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
6726530229 fix(tests): run all test with Babel 7
Temporarily disable xo-web tests which uses Babel 6.
2018-05-28 18:18:44 +02:00
Julien Fontanet
8351352541 chore(xo-remote-parser/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
3f9e8d79ea chore(xo-collection/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
685f2328bd chore(package): update dependencies 2018-05-28 14:54:55 +02:00
Pierre Donias
746567a8a7 fix(xo-acl-resolver,xo-web): SR & PIF ACLs inheritance (#2994) 2018-05-25 15:26:45 +02:00
Julien Fontanet
c116c41c42 chore(package): update dependencies 2018-05-25 11:42:55 +02:00
Pierre Donias
3768a7de37 fix(xo-web/select): do not auto-select disabled option (#2992) 2018-05-25 11:11:21 +02:00
Julien Fontanet
11ef0ee54f feat(rolling snapshots legacy): check VDI chains (#2986) 2018-05-24 16:39:06 +02:00
Julien Fontanet
33ae531e3a fix(package): update hashy to 0.7.1
Correctly handle hash with identifier `2b`.
2018-05-24 14:52:38 +02:00
Julien Fontanet
8cc9924751 feat(xo-server/importDeltaVm): add UUID of missing base VM 2018-05-24 00:04:52 +02:00
Julien Fontanet
c329ab863b feat(Backup NG): configurable concurrency (#2918) 2018-05-23 16:29:31 +02:00
Julien Fontanet
41820ea316 fix(xo-server/backup legacy): fix reports (#2979) 2018-05-23 15:56:40 +02:00
badrAZ
bf00f80716 fix(xo-web): update @julien-f/freactal to 0.1.1 (#2972)
This fix prevent the cursor from jumping to the end of the input when editing the backup's name.
2018-05-23 14:47:28 +02:00
Julien Fontanet
9baf0c74e4 chore(vhd-lib): move some deps to devDeps 2018-05-23 11:49:59 +02:00
Julien Fontanet
b59ccdf26f chore(package): update dependencies 2018-05-23 11:23:25 +02:00
Julien Fontanet
9cae978923 feat(xo-server): 5.19.9 2018-05-22 19:43:19 +02:00
Julien Fontanet
311d914b96 feat(xo-web/import): remove restriction for Free 2018-05-22 16:27:17 +02:00
Julien Fontanet
592cb4ef9e feat(xo-web): 5.19.8 2018-05-22 15:53:45 +02:00
Julien Fontanet
ec2db7f2d0 feat(xo-vmdk-to-vhd): 0.1.2 2018-05-22 15:53:03 +02:00
Julien Fontanet
71eab7ba9b feat(xo-web): 5.19.7 2018-05-22 15:36:54 +02:00
badrAZ
5e07171d60 fix(xo-server/backup-ng): fix incorrect condition (#2971) 2018-05-22 11:45:02 +02:00
Rajaa.BARHTAOUI
3f73e3d964 fix(xo-server,xo-web): message when no Xen tools (#2916)
Fixes #2911
2018-05-22 09:57:46 +02:00
badrAZ
0ebe78b4a2 feat(xo-server-usage-report): improve the report (#2970)
Fixes #2968
2018-05-21 16:25:40 +02:00
Julien Fontanet
61c3379298 feat(xo-server): 5.19.8 2018-05-21 11:00:19 +02:00
Julien Fontanet
44866f3316 feat(xo-web): 5.19.6 2018-05-21 10:42:37 +02:00
Julien Fontanet
4bb8ce8779 feat(vhd-lib): 0.1.0 2018-05-21 10:03:37 +02:00
Julien Fontanet
58eb6a8b5f feat(xo-server): 5.19.7 2018-05-18 18:44:34 +02:00
Julien Fontanet
52f6a79e01 fix(xo-server/backupNg/logs): include merge/transfer size (#2965) 2018-05-18 18:44:07 +02:00
Julien Fontanet
129f79d44b feat(xo-web): 5.19.5 2018-05-18 18:42:31 +02:00
Julien Fontanet
385c3eb563 feat(xo-vmdk-to-vhd): 0.1.1 2018-05-18 18:40:26 +02:00
Julien Fontanet
e56be51b45 chore(xo-server/backups-ng): remove incorrect TODO 2018-05-18 17:14:50 +02:00
Olivier Lambert
24ae65b254 fix(xo-server/sr.createNfs): nfsVersion → nfsOptions (#2904) 2018-05-18 16:28:02 +02:00
badrAZ
d5dffbacbd fix(xo-web/FormattedDuration): handle duration < 0 seconds (#2964) 2018-05-18 15:06:23 +02:00
Julien Fontanet
c6ae969a82 fix(xo-server/https): ask for passphrase (#2963)
Fixes #2962
2018-05-18 15:05:49 +02:00
Nicolas Raynaud
005a9fdc01 fix(xo-vmdk-to-vhd): various bugs (#2961) 2018-05-18 14:02:19 +02:00
Jerome Charaoui
f505d4d911 Fix SR creation when using options or NFSv4 (#2960) 2018-05-17 22:12:09 +02:00
badrAZ
8ada6b121e fix(backup-ng/logs): handle the case when transfer duration equals 0 (#2954) 2018-05-17 16:58:29 +02:00
Julien Fontanet
b9a87efb0d fix(xo-server/backupNg): dont fail on corrupted VHDs (#2957)
Corrupted VHD files (usually uncleaned temporary) could fail the job.
2018-05-17 11:27:02 +02:00
Pierre Donias
89485a82d2 feat(xo-web): make many objects' UUID copiable (#2955)
Fixes #2925

- host/tab-network
- pool/tab-network
- vm/tab-disks
- vm/tab-network
- vm/tab-snapshots
2018-05-16 17:39:47 +02:00
Pierre Donias
451f87c6b4 feat(xo-web/servers): allow unauthorized cert. when adding server (#2953)
Fixes #2926
2018-05-16 13:44:27 +02:00
Rajaa.BARHTAOUI
c3cb5a3221 feat(xo-server,xo-web): VM HA options (#2946)
Fixes #2917
2018-05-16 13:27:40 +02:00
Julien Fontanet
458609ed2e feat(xo-server): 5.19.6 2018-05-16 10:32:59 +02:00
Julien Fontanet
fcec8113f3 fix(xo-server/backupNg): await writeStream (#2951) 2018-05-16 10:32:38 +02:00
Julien Fontanet
ebbd882ee4 feat(xo-web): 5.19.4 2018-05-15 17:44:25 +02:00
Julien Fontanet
0506e19a66 chore(xo-server/backups-ng): update todo list 2018-05-15 17:44:09 +02:00
Pierre Donias
ecc62e4f54 fix(xo-web/xosan): install packs button condition (#2950) 2018-05-15 17:40:40 +02:00
Julien Fontanet
2b95eb4e4d feat(xo-web): 5.19.3 2018-05-15 16:11:53 +02:00
Julien Fontanet
bcde9e0f74 feat(xo-server): 5.19.5 2018-05-15 16:11:34 +02:00
Pierre Donias
114501ebc7 feat(XOSAN): allow user to update packs (#2782) 2018-05-15 16:11:04 +02:00
badrAZ
ebab7c0867 fix(backup-ng/logs): handle the case when transfer/merge duration equals 0 (#2949) 2018-05-15 16:10:17 +02:00
Julien Fontanet
0e2270fb6e feat(xo-web): 5.19.2 2018-05-15 14:46:33 +02:00
Julien Fontanet
593493ec0c feat(xo-server): 5.19.4 2018-05-15 14:46:07 +02:00
Julien Fontanet
d92898a806 feat(xo-vmdk-to-vhd): 0.1.0 2018-05-15 14:45:19 +02:00
Julien Fontanet
7890e46551 feat(xo-server-backup-reports): 0.11.0 2018-05-15 14:42:32 +02:00
Julien Fontanet
ef942a6209 feat(Backup NG): implrtment logs and reports (#2869) 2018-05-15 14:40:11 +02:00
Nicolas Raynaud
fdde916388 feat(xo-web/vms-import): redirect to VM or home page (#2942)
If a single VM has been imported, redirect to its page.

If multiple VMs has been imported, redirect to the homepage with all other VMs filtered out.
2018-05-14 17:42:11 +02:00
Julien Fontanet
31314d201b fix(xo-server/backupNg/delta): await deletion/merge 2018-05-14 15:38:11 +02:00
Julien Fontanet
a29a949c51 fix(xo-server/backupNg/delta): deleteFirst iff retention > 1 2018-05-14 15:37:09 +02:00
Julien Fontanet
cc1ce8c5f8 chore: update yarn.lock 2018-05-14 13:53:03 +02:00
Nicolas Raynaud
a21bf4ebe5 chore: major VHD code restructuring (#2808)
Related to #2792
2018-05-14 13:48:16 +02:00
Julien Fontanet
3d0420dbd9 fix(xo-server/backupNg): clean metadata on VM iself (#2945) 2018-05-14 11:47:34 +02:00
Julien Fontanet
04c74dd30f fix: missing bit of last commit 2018-05-11 20:17:02 +02:00
Julien Fontanet
2f256291ae fix(xo-server/backup legacy/delta import): autofix path (#2941)
Because the path might be incorrect and be `_full.vhd` instead of `_delta.vhd`.

I know…
2018-05-11 20:16:00 +02:00
Julien Fontanet
bcb66a4145 fix(xo-server/backup NG/listReplicatedVms): avoid templates and snapshots 2018-05-11 18:45:47 +02:00
Fabrice Marsaud
2d9368062e fix(xo-web/xoa-updater): dont block paying plans when updater is on error (#2939) 2018-05-11 17:18:24 +02:00
Pierre Donias
b110bacf61 fix(xo-server/patches): bulk install (#2935) 2018-05-09 17:32:11 +02:00
Julien Fontanet
78afdc0af5 feat(xo-web): 5.19.1 2018-05-07 13:19:59 +02:00
Julien Fontanet
ad6cd7985a feat(xo-server): 5.19.3 2018-05-07 13:07:21 +02:00
Julien Fontanet
a61661776d fix(xo-server/listVmBackupsNg): handle missing vhds field
This field is only present on delta backups
2018-05-07 11:02:14 +02:00
Julien Fontanet
1a9ebddcab fix(xo-server/listBackupNgPartitionFiles): missing await
Strange things though, it works in dev compilation mode…

Fixes #2929
2018-05-07 10:33:44 +02:00
Julien Fontanet
7ab907a854 feat(xo-server/backup NG): file restore (#2889) 2018-05-06 18:38:47 +02:00
Olivier Lambert
68a34f7cdb feat(changelog): update changelog 2018-05-05 12:05:56 +02:00
Rajaa.BARHTAOUI
da4ff3082d feat(xo-web/health): list VM snapshots related to non-existing backup jobs (#2899)
Fixes #2828
2018-05-04 15:59:11 +02:00
Rajaa.BARHTAOUI
9c05a59b5f feat(xo-web/SR/advanced): add VDI UUID in coalesce table (#2919)
Fixes #2903
2018-05-04 12:28:15 +02:00
Rajaa.BARHTAOUI
6780146505 feat(xo-web/patches): better "restart host" warnings (#2909)
Fixes #2866
2018-05-04 10:41:02 +02:00
Julien Fontanet
2758833fc6 feat(xo-server): 5.19.2 2018-05-03 19:12:48 +02:00
Julien Fontanet
2786d7ec46 fix(vhd/createSyntheticReadStream): sectorsPerBlock → sectorsPerBlockData 2018-05-03 19:11:01 +02:00
Julien Fontanet
945a2006c9 feat(xo-server/legacy backup/import): use synthetic stream (#2913) 2018-05-02 17:48:13 +02:00
badrAZ
b9e574e32f fix(SR/tab-stats): fix IOPS's and IOwait's values format (#2914) 2018-05-02 14:17:13 +02:00
Julien Fontanet
34f1ef1680 feat(xo-server): 5.19.1 2018-05-01 17:26:15 +02:00
Julien Fontanet
4ac4310bc1 fix(xo-server/importDeltaVm): remove extra return
It prevented the VBD creation in case of a new VDI.
2018-05-01 17:25:46 +02:00
Julien Fontanet
a10997ca66 feat(xo-web): 5.19.0 2018-05-01 16:13:20 +02:00
Julien Fontanet
0e52a4c7dc feat(xo-server): 5.19.0 2018-05-01 16:12:44 +02:00
Julien Fontanet
a4b3e22c2b feat(xo-server-perf-alert): 0.1.0 2018-05-01 16:10:10 +02:00
Julien Fontanet
441bd7c754 feat(xo-server-auth-saml): 0.5.2 2018-05-01 16:09:06 +02:00
badrAZ
ddbd32d1cb chore(xo-web/backup-ng/new): single effect to toggle modes (#2908) 2018-04-30 11:19:45 +02:00
Pierre Donias
a5b0cbeaea feat(xo-server-perf-alert): SR disk space (#2905) 2018-04-27 17:38:19 +02:00
Rajaa.BARHTAOUI
c6f3b2b1ce feat(xo-web/XOA update): display "Downgrade" when trial is over (#2845)
Fixes #1483
2018-04-27 10:05:27 +02:00
Pierre Donias
3d869d9fa1 chore(xo-web/health): remove irrelevant actions on VDIs (#2882) 2018-04-26 17:37:28 +02:00
Julien Fontanet
7a5229741f chore: disable tests on Node 9 due to upath error 2018-04-26 17:21:00 +02:00
Rajaa.BARHTAOUI
78e0c2d8fa feat(xo-web/SortedTable): support link actions (#2893) 2018-04-26 15:46:10 +02:00
Pierre Donias
5928984069 feat(xo-web/home): sort by container name (#2901)
Fixes #2680
2018-04-26 15:21:48 +02:00
Julien Fontanet
61a472f108 chore(xo-server/vhd/createReadStream): improve genericity (#2865)
It should now be pretty easy to make it work to generate a delta VHD, which should be very useful when mergin multiple deltas together (during deletion.
2018-04-24 18:06:58 +02:00
Julien Fontanet
e45f78ea20 fix(xo-web/backup-ng): delete backups sequentially (#2855)
- sequentially: to limit merge issues
- from newest to oldest: to avoid unnecessary merges
2018-04-23 16:35:34 +02:00
Olivier Lambert
b3ae9d88eb feat(vm): expose vendor device features in advanced tab. Fixes #2883 (#2894) 2018-04-23 15:02:40 +02:00
Pierre Donias
f7f26537be fix(xo-web/vm/network): bad import after file name change (#2892)
Fixes #2891

Introduced by 769c32a1b1
2018-04-23 14:08:56 +02:00
Julien Fontanet
96848fc6d4 fix(xo-server/importDeltaVm): create VBDs earlier (#2885)
To avoid orphan VDIs AMAP.
2018-04-20 17:33:36 +02:00
Julien Fontanet
51e6f0c79f feat(xo-server-usage-report): 0.4.2 2018-04-19 09:39:47 +02:00
badrAZ
4f94ad40b7 fix(xo-server-usage-report): handle missing metrics (#2880) 2018-04-18 16:30:30 +02:00
Pierre Donias
43e1eb9939 fix(xo-web/Ellipsis): handle patchedRender wrapping (#2881)
Also use Ellipsis on resource set name in home/VM view
2018-04-18 11:00:09 +02:00
Julien Fontanet
1f6d7de861 feat(xo-server-usage-report): 0.4.1 2018-04-17 14:03:32 +02:00
Julien Fontanet
bd623c2daf feat(xo-web): 5.18.3 2018-04-17 11:36:00 +02:00
Pierre Donias
40c71c2102 fix(xo-web/SelectSr): "pool" typo (#2878)
Fixes #2875
2018-04-17 11:05:56 +02:00
Nicolas Raynaud
72a1580eff fix(xo-server/vhd-merge.integ.spec): increase timeout (#2874) 2018-04-17 11:04:30 +02:00
Julien Fontanet
9e2404a0d7 feat(xo-web): 5.18.2 2018-04-16 17:32:24 +02:00
Julien Fontanet
7dd84d1518 feat(xo-server): 5.18.3 2018-04-16 17:29:02 +02:00
Julien Fontanet
d800db5d09 fix(xo-web/backup-ng/new): fix empty srs & remotes
Send `undefined` instead of `false`.
2018-04-16 17:26:27 +02:00
Julien Fontanet
2714ccff38 fix(xo-server/backupNg.{create,edit}Job): check srs param 2018-04-16 17:15:35 +02:00
Julien Fontanet
1d493e411b fix(xo-server/backups-ng): correctly detect delta exports
Fixes #2833
2018-04-16 16:54:55 +02:00
Julien Fontanet
2a0c222f2d chore(xo-server): use xen-api 0.16.9 2018-04-16 16:30:59 +02:00
Julien Fontanet
641d68de0e feat(xen-api): 0.16.9 2018-04-16 16:29:41 +02:00
Julien Fontanet
2dd0fd660b chore(xo-server/backups-ng): update todo list 2018-04-16 16:28:09 +02:00
badrAZ
bb5441c7bc feat(xo-web/SelectSr): add container name to SRs that have the same names (#2824)
Fixes #1762
2018-04-16 16:16:55 +02:00
badrAZ
eeea9e662b fix(xo-web/backup-ng/new): rename edit button and change cancel icon (#2858)
See #2711
2018-04-16 15:49:54 +02:00
badrAZ
8d4874e240 fix(xo-web/backupNg/new): make the default retention equals 1 (#2872)
See #2711
2018-04-16 15:27:55 +02:00
badrAZ
a8ba4a1a8e feat(xo-web): stats for SRs (#2847) 2018-04-16 14:40:00 +02:00
Julien Fontanet
0c027247ec fix(normalize-packages): homepage for scoped packages 2018-04-15 23:41:27 +02:00
badrAZ
164cb39c1b fix(xo-web/backup/new): schedules values can be null (#2773) 2018-04-13 17:10:03 +02:00
Julien Fontanet
52503de645 fix(xo-web/initial fetch): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Julien Fontanet
83b8b5de61 fix(xo-web/updater): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Rajaa.BARHTAOUI
3e326c4e62 feat(xo-web/updater): disable upgrade button when not needed (#2816)
Fixes #1594
2018-04-13 16:46:58 +02:00
Julien Fontanet
a6b0690416 fix(xo-server): unmanaged VDI snapshots are VDI-unmanaged 2018-04-13 11:36:10 +02:00
Julien Fontanet
dcd007c5c7 fix(xen-api): fix sync test in watchTask (#2868) 2018-04-12 18:02:51 +02:00
Julien Fontanet
eb090e4874 fix(xen-api): getObject* should not return null 2018-04-12 11:06:08 +02:00
Julien Fontanet
4b716584f7 feat(xo-server): 5.18.2 2018-04-11 17:48:59 +02:00
Julien Fontanet
4bc348f39f fix(xo-server/vhd/createReadStream): emit empty if missing sectors 2018-04-11 17:47:43 +02:00
Julien Fontanet
9c75992fe4 feat(xo-web): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
4bb2702ac5 feat(xo-server): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
ea8133cb41 fix(xo-server/vhd/createReadStream): handle unallocated blocks (#2859)
Fixes #2857
2018-04-11 17:24:46 +02:00
Pierre Donias
fc40c7b03d fix(xo-web/new SR): create button not showing (#2854)
Fixes #2853
2018-04-11 10:21:06 +02:00
Julien Fontanet
7fe5b66fdb feat(xo-server-auth-saml): log profile when no name found 2018-04-10 19:09:30 +02:00
Julien Fontanet
0f1d052493 chore: update dependencies 2018-04-09 18:11:53 +02:00
badrAZ
56a182f795 fix(xo-web/backup-ng/new): dont add a target more than once (#2849)
Fixes #2848
2018-04-09 17:22:38 +02:00
Julien Fontanet
e8da1b943b fix(xo-server/backups-ng): create all forks at the same time (#2842)
Fixes #2790
2018-04-09 16:42:05 +02:00
Julien Fontanet
3913b0eba1 feat(xen-api): 0.16.8 2018-04-09 13:58:52 +02:00
Julien Fontanet
7990e45095 fix(xen-api): allow UUIDs for ro calls 2018-04-09 13:56:47 +02:00
Julien Fontanet
a7068ec166 fix(xo-server/importDeltaVm): better network matching (#2834)
Fixes #2093
2018-04-07 01:00:19 +02:00
Pierre Donias
55b35ac0cf NFS version and options (#2841)
Add NFS version & NFS options. Fixes #2706
2018-04-06 17:46:18 +02:00
Julien Fontanet
a251f8ca75 fix(xo-server/backups-ng): don't remove startable VMs (#2840)
Fixes #2724
2018-04-06 17:12:36 +02:00
Rajaa.BARHTAOUI
172ce2c7a1 feat(xo-web/jobs/new): use SortedTable (#2670)
See #2416
2018-04-06 16:45:46 +02:00
Olivier Lambert
3cef668a75 feat(xo-web,xo-server): create HBA SR (#2836)
Fixes #1992
2018-04-06 16:01:48 +02:00
Olivier Lambert
e6deb29070 fix(SR): incorrect case in deviceConfig for iSCSI probe (#2839) 2018-04-06 15:03:04 +02:00
Olivier Lambert
51609d45a2 feat(xo-web,xo-server): expose VM Xen Tools version (#2838)
Fixes #2650
2018-04-06 14:26:44 +02:00
Rajaa.BARHTAOUI
5cb6dc6d92 feat(xo-web): create new disk from SR view (#2726)
Fixes #2229
2018-04-06 13:54:32 +02:00
Nicolas Raynaud
c5174a61b7 chore(xo-server/debounce): reduce test flakiness (#2831) 2018-04-06 10:14:25 +02:00
badrAZ
93e987982c fix(xo-web/logs): displays the correct calls state when the job is interrupted (#2734)
Fixes #2732
2018-04-05 16:46:43 +02:00
Julien Fontanet
fc421428fd fix: missing ESLint config changes 2018-04-05 16:15:26 +02:00
Julien Fontanet
7400bd657a chore: coding style fixes 2018-04-05 15:53:57 +02:00
Julien Fontanet
da62cba3f8 chore: update dependencies 2018-04-05 11:00:03 +02:00
Patrick Tully
461cc7e547 fix(xo-web/icons.scss): remove extra commas (#2817) 2018-04-05 10:57:18 +02:00
badrAZ
b898ed4785 feat(xo-server/xapi-stats): new implementation (#2648) 2018-04-04 14:20:30 +02:00
Julien Fontanet
149530e73f feat(cron): 1.0.3 2018-04-03 17:21:21 +02:00
Julien Fontanet
7e627c953e fix(cron): selecting the first sunday of the month 2018-04-03 17:21:21 +02:00
Pierre Donias
bc86984f19 chore(xo-server/createNetwork): set other_config.automatic to false (#2825)
Fixes #2818

If a network has its other_config.automatic value set to any value other than
false then XenCenter's New VM wizard will create a VIF connected to this network
See https://citrix.github.io/xenserver-sdk/#network
2018-04-03 15:32:39 +02:00
Julien Fontanet
e40f3acdd4 feat(xo-web): 5.18.0 2018-03-30 18:04:11 +02:00
Julien Fontanet
63d93224e0 feat(xo-server): 5.18.0 2018-03-30 18:03:53 +02:00
badrAZ
c87356c319 feat: ability to delete a default template (#2812)
Fixes #2666
2018-03-30 18:03:12 +02:00
Julien Fontanet
74f4a83aea feat(xo-server-usage-report): 0.4.0 2018-03-30 17:55:57 +02:00
Julien Fontanet
e67038a04d feat(xo-server-auth-saml): 0.5.1 2018-03-30 17:54:30 +02:00
badrAZ
1fa73b57a2 feat(xo-web/dashboard/overview): add filters for pools and hosts (#2769)
Fixes #1631
2018-03-30 17:44:36 +02:00
badrAZ
73c746fdd3 fix(xo-web/backup-ng/new): xoa plan verification (#2813) 2018-03-30 17:22:21 +02:00
Julien Fontanet
ab1413b741 feat(xen-api): more info to task destroyed before completion error 2018-03-30 15:28:53 +02:00
Julien Fontanet
c087eaf229 chore(xo-server): increase blocked threshold from 10 to 50 2018-03-30 15:09:47 +02:00
Julien Fontanet
8b9f9ffa3e feat(xo-server/snapshotVm): increase concurrency to 2 2018-03-30 12:39:51 +02:00
Julien Fontanet
a83fa90d87 chore(xo-server/snapshotVm): avoid using waitObjectState 2018-03-30 12:39:50 +02:00
Julien Fontanet
505f06c1d8 chore(xo-server/backups-ng): dont fork streams if 1 target 2018-03-30 12:39:50 +02:00
Julien Fontanet
2ac1093543 chore(xo-server/backups-ng): rm unneeded defer decorators 2018-03-30 12:39:50 +02:00
Pierre Donias
b3d8ce2041 feat(xo-web/new-vm): hide IP field if IP pools are not configured (#2811)
Fixes #2739
2018-03-29 17:19:38 +02:00
Rajaa.BARHTAOUI
b47789bf82 feat(xo-web): confirm modal before manual backup run (#2717)
Fixes #2355
2018-03-29 15:06:50 +02:00
Julien Fontanet
0a5e1a9bce fix(xo-server/backups-ng): discriminate replicated against the VM (#2809)
Fixes #2807
2018-03-29 13:47:16 +02:00
Julien Fontanet
f333679319 fix(xo-server/backups-ng): dont snapshot on unhealthy vdi chain 2018-03-29 10:51:22 +02:00
Julien Fontanet
20d3faa306 fix(xo-server/backups-ng): delete unused snapshot on delta failure 2018-03-29 10:49:48 +02:00
Julien Fontanet
cf11ed0830 fix(xo-server/backups-ng): dont delete snapshot on failure 2018-03-29 09:47:32 +02:00
Julien Fontanet
acd390ac42 todo(xo-server/backups-ng): do not delete rolling snapshot in case of failure 2018-03-28 17:52:07 +02:00
badrAZ
8a2fbe3ab5 feat(xo-web/backup): ability to migrate legacy to NG (#2801)
Fixes #2711
2018-03-28 14:33:43 +02:00
Julien Fontanet
7a6e7ec153 fix(xo-web/backup-ng): display ids like in logs 2018-03-28 11:48:42 +02:00
Julien Fontanet
7d90346c91 feat(xen-api): 0.16.7 2018-03-28 11:46:45 +02:00
Julien Fontanet
abb5193ced chore(xen-api/getObject*): clearer error messages 2018-03-28 11:46:45 +02:00
Julien Fontanet
52e845834e chore(xen-api): more explicit tests 2018-03-28 11:46:45 +02:00
Julien Fontanet
c1c17fad44 fix(xen-api/getObject): match obj.$id against refs 2018-03-28 11:46:45 +02:00
Julien Fontanet
d7b4025893 todo(xo-server/backups-ng): detect and gc uncomplete replications 2018-03-28 11:46:45 +02:00
Rajaa.BARHTAOUI
934356571c feat(xo-web/home): fix toolbar in header (#2798)
Fixes #1581
2018-03-28 11:29:27 +02:00
Julien Fontanet
738d98eb42 chore(xo-server): update http-server-plus to 0.10
Fixes #2803
2018-03-28 00:11:17 +02:00
Nicolas Raynaud
7e689076d8 chore(xo-server/vhd-merge): various updates (#2767)
Fixes #2746 

- implement parent locators
- tests
- remove `@nraynaud/struct-fu`
2018-03-27 18:39:36 +02:00
Rajaa.BARHTAOUI
0b9d031965 feat(xo-web/jobs/overview): use SortedTable (#2677)
See #2416
2018-03-27 16:56:56 +02:00
badrAZ
53f470518b feat(xo-server-usage-report): various improvements (#2788)
Fixes #2770
2018-03-27 16:07:29 +02:00
Rajaa.BARHTAOUI
664d648435 feat(xo-web/vm/disks): use SortedTable (#2429)
See #2416
2018-03-27 11:13:05 +02:00
Julien Fontanet
0d718bd632 feat(xo-server/backup NG): merge VHD in a worker (#2799) 2018-03-27 10:13:05 +02:00
badrAZ
ed5e0c3509 feat(xo-web/xoa/update): warn before upgrade if jobs running (#2795)
Fixes #2250
2018-03-26 18:01:29 +02:00
Julien Fontanet
20d5047b55 chore(xo-server/ag2s): use async-iterator-to-stream instead 2018-03-26 16:32:46 +02:00
Pierre Donias
4cfe3ec06e fix(xo-server/new-vm): race condition on VIFs (#2796)
Fixes #2794
2018-03-26 11:18:36 +02:00
Julien Fontanet
87664ff16a chore(xo-server-auth-saml): config description 2018-03-26 11:10:16 +02:00
Pierre Donias
adf278fc83 fix(xo-web/home): pagination (#2791)
Fixes #2730
2018-03-21 16:54:29 +01:00
Pierre Donias
a4d0fa62d2 chore(xo-web/restore): minor improvements & fixes (#2789)
Fixes #2692
2018-03-21 15:51:12 +01:00
Pierre Donias
ff59d091f1 fix(xo-server-cloud): check token before getResourceDownloadToken call (#2783) 2018-03-20 15:36:03 +01:00
Pierre Donias
4cac99d79a feat(xo-web/home): put sort criteria in URL (#2780)
Fixes #2585
2018-03-20 10:41:03 +01:00
Rajaa.BARHTAOUI
d1a046279d feat(xo-web/modal): autofocus strong-confirm text input (#2749) 2018-03-19 15:34:52 +01:00
Julien Fontanet
cb9fa5c42b chore: update dependencies 2018-03-19 14:55:19 +01:00
Julien Fontanet
05f9e6895b feat(xo-web): 5.17.3 2018-03-16 17:49:31 +01:00
Julien Fontanet
63b5ee6f96 feat(xo-server): 5.17.4 2018-03-16 17:49:11 +01:00
Julien Fontanet
36d2de049f feat(xo-server/vhd): createReadStream (#2763)
A stream to a synthetic full VHD.
2018-03-16 17:47:10 +01:00
Julien Fontanet
86b0d5e2b7 fix(xo-server/backupNg.importVmBackup): do not try to detect base VDI 2018-03-16 17:38:56 +01:00
Julien Fontanet
d34f641130 fix(xo-server/backupNg.importVmBackup): do not try to detect base VM 2018-03-16 17:30:42 +01:00
Julien Fontanet
39d7b4c7bd fix(xo-server/backupNg.importVmBackup): fix VM name for delta 2018-03-16 17:15:18 +01:00
Julien Fontanet
ad0d4156fb fix(xo-server/backupNg.importVmBackup): add missing version for delta 2018-03-16 17:15:18 +01:00
badrAZ
80187e2789 feat(xo-web/self-service): add the internal networks to the networks' select (#2664) 2018-03-16 16:57:44 +01:00
badrAZ
89e25c9b81 fix(xo-web/dashboard/overview): missing patches not fetched (#2772)
Fixes #2768
2018-03-16 16:37:57 +01:00
Julien Fontanet
ca51d59815 chore(xo-server/chainVhd): allow parent and child to be in different dirs (#2762) 2018-03-16 16:24:45 +01:00
Julien Fontanet
433f445e99 fix(xo-server/backups): no checksum files for VHDs (#2761)
Because keeping them up-to-date after chainings and merges is too expensive (requires reading the whole file).

In legacy backups they were keeping up-to-date and great costs and never used for verification anyway.
2018-03-16 16:24:25 +01:00
Julien Fontanet
474a765e1b chore(xo-server/chainVhd): remove checksum recomputing (#2759)
It's high time to remove this.
2018-03-16 16:24:02 +01:00
badrAZ
7d4b17380d feat(Backups NG): fourth iteration (#2756) 2018-03-16 16:23:19 +01:00
Julien Fontanet
b58b1d94cd fix(xo-server/xapi): add missing import 2018-03-16 16:21:20 +01:00
Olivier Lambert
16e7257e3b feat(host/pool): clearer memory info (#2771)
Fixes #2750
2018-03-16 14:19:59 +01:00
Julien Fontanet
ca1a46f980 chore(xo-server/backups-ng): add todo items 2018-03-14 15:52:26 +01:00
Julien Fontanet
596bd12f59 chore(xo-server/vhd): add format cheatsheet 2018-03-14 15:51:02 +01:00
badrAZ
301ab65c01 fix(xo-web/backup/overview): fix the race condition between subscriptions (#2766)
Fixes #2733
2018-03-14 14:15:47 +01:00
Pierre Donias
35f210e074 fix(xo-server/xosan): make tmpBoundObjectId unique (#2760)
Fixes #2758
2018-03-13 16:05:38 +01:00
Julien Fontanet
c239b518e0 chore(xo-server/checksum): documentation 2018-03-13 15:57:08 +01:00
Julien Fontanet
f45935aa44 chore(xo-server/vhd-merge): abstract FD handling (#2757)
Due to our smart implementation, the Vhd class does not need to be aware of the fact that the file is already opened.
2018-03-13 15:09:21 +01:00
Julien Fontanet
782505b292 feat(xo-server): close inactive HTTP connections on stop 2018-03-13 10:22:06 +01:00
Julien Fontanet
1368e3b86d chore: update dependencies 2018-03-13 10:06:23 +01:00
Julien Fontanet
ab9c24401e feat(xo-web): 5.17.2 2018-03-12 17:54:09 +01:00
Julien Fontanet
831f4e48d1 feat(xo-server): 5.17.3 2018-03-12 17:53:34 +01:00
Julien Fontanet
f5511449af fix(xo-server/vm.create): work around a race condition (#2755)
Fixes #2747
2018-03-12 17:52:53 +01:00
Julien Fontanet
80c1e39b53 feat(Backups NG): third iteration (#2729) 2018-03-12 17:26:20 +01:00
badrAZ
3ce4e86784 fix(xo-web/sorted-table): returns undefined if userData is empty (#2752)
Fixes #2748
2018-03-12 17:19:15 +01:00
Julien Fontanet
fb617418bb feat(xo-server): 5.17.2 2018-03-09 19:12:00 +01:00
Pierre Donias
9fb0f793b2 fix(prettierrc): add trailingComma to avoid conflicts with eslint (#2744) 2018-03-09 11:55:02 +01:00
Julien Fontanet
3b21a097ab fix(xo-web): handle incorrect filters (#2743)
Fixes #2740
2018-03-09 11:30:22 +01:00
Rajaa.BARHTAOUI
ef09a42a89 feat(xo-web): disconnect VDI from Health view (#2655)
See #2505
2018-03-08 14:48:30 +01:00
Julien Fontanet
74d8f2a859 fix(xo-server): test with Babel 7 2018-03-08 11:00:00 +01:00
Julien Fontanet
48910f9c0f fix(xo-server/remote-handlers): do not swallow sync() value 2018-03-08 10:51:24 +01:00
Julien Fontanet
788a1accbd feat(xo-server): update to Babel 7 (#2731) 2018-03-08 10:11:14 +01:00
Julien Fontanet
b254e7e852 chore: update dependencies 2018-03-08 00:13:54 +01:00
Julien Fontanet
e288fa1b8a feat(xo-web): 5.17.1 2018-03-07 21:22:18 +01:00
Julien Fontanet
eb9ec68494 feat(xo-server): 5.17.1 2018-03-07 21:21:33 +01:00
Julien Fontanet
10ab4f2d79 fix(xo-server): work around minor Babel issue 2018-03-07 21:11:20 +01:00
badrAZ
b1986dc275 feat(Backups NG): second iteration (#2718) 2018-03-07 20:57:28 +01:00
Julien Fontanet
831e36ae5f fix(xo-server/exportDeltaVm): cannot assign ro name_label 2018-03-07 20:41:20 +01:00
Julien Fontanet
77a2d37d98 fix(xo-server/exportDeltaVm): do not leak the snapshot name
Fixes #2727
2018-03-07 20:30:09 +01:00
Julien Fontanet
37b90e25dc fix(xo-server/jobs): userIds are strings
Fixes #2728
2018-03-07 20:17:22 +01:00
Julien Fontanet
41f16846b6 chore(xo-server): addChecksumToReadStream → createChecksumStream (#2725)
`addChecksumToReadStream` was overly complicated and its usage was limited.

`createChecksumStream` is similar but does not pipe the readable stream in by itself.
2018-03-06 17:48:21 +01:00
Julien Fontanet
3e89c62e72 chore(xo-server): replace eventToPromise with fromEvent 2018-03-06 16:40:29 +01:00
Julien Fontanet
b7d3762c06 chore(xo-server): delete unused schedules on clean 2018-03-06 16:39:00 +01:00
Julien Fontanet
481bc9430a chore(xo-server/utils): remove unnecessary moment-timezone import 2018-03-06 16:38:07 +01:00
Julien Fontanet
13f2470887 chore(xo-server): remove createRawObject
Replace both `createRawObject()` and `Object.create()` by `{ __proto__: null }`.
2018-03-06 16:36:41 +01:00
Julien Fontanet
0308fe4e6e chore(xo-server): add checksum handling for VM import 2018-03-06 16:36:13 +01:00
Julien Fontanet
197273193e chore(xo-server): explicitly check for a schedule 2018-03-06 16:35:42 +01:00
Julien Fontanet
e4b11a793b chore(xo-server): move checksum streams into own module 2018-03-06 16:34:22 +01:00
Julien Fontanet
927d3135c4 chore(xo-server): rename removeSchedule to deleteSchedule 2018-03-06 16:32:59 +01:00
Julien Fontanet
aa533c20d6 fix(xo-server): respect compression param 2018-03-06 16:31:11 +01:00
Julien Fontanet
7fd615525a chore(xen-api): TODO do not cancel a finished task 2018-03-06 16:26:05 +01:00
Julien Fontanet
6abf3fc0af feat: add code of conduct 2018-03-06 10:18:05 +01:00
Julien Fontanet
6bb0929822 chore(xo-server/backupNg): remove unnecessary destructuring 2018-03-03 10:51:57 +01:00
Julien Fontanet
feebc04e55 chore(xo-server/BackupsNg): remove schedule default value
A backup NG job cannot be run without a schedule anyway
2018-03-03 10:26:45 +01:00
Julien Fontanet
2d406cd7c1 chore(xo-server/backupNg): rename importVmbackup{Ng,} 2018-03-03 10:25:01 +01:00
Julien Fontanet
788bfe632f chore(xo-server/exportDeltaVm): pass cancel token to _snapshotVm 2018-03-03 10:21:10 +01:00
Julien Fontanet
1149102f37 chore(xo-server/exportDeltaVm): pass name to _snapshotVm
Instead of setting it manually afterward.
2018-03-03 10:20:35 +01:00
Julien Fontanet
8bd949f618 chore(xo-server/exportDeltaVm): use _snapshotVm directly 2018-03-03 10:19:51 +01:00
Julien Fontanet
489b142a66 chore(xo-server): remove unnecessary getObject in exportDeltaVm 2018-03-03 10:15:24 +01:00
Julien Fontanet
cbbbb6da4f chore(xo-server): doc attrs of VMs created by Backup NG 2018-03-03 10:14:06 +01:00
Julien Fontanet
6701c7e3af chore(xo-server): use checksumFile helper unlink 2018-03-03 10:07:09 +01:00
Julien Fontanet
ecd460a991 feat(xo-web): 5.17.0 2018-03-02 19:57:24 +01:00
Julien Fontanet
b4d7648ffe feat(xo-server): 5.17.0 2018-03-02 19:57:04 +01:00
Julien Fontanet
eb3dfb0f30 feat(Backups NG): first iteration (#2705) 2018-03-02 19:56:08 +01:00
Julien Fontanet
2b9ba69480 fix(xo-server): getJob return the correct job 2018-03-02 19:53:16 +01:00
Julien Fontanet
8f784162ea chore(xo-server): Xapi#exportDeltaVm make streams writable 2018-03-02 19:52:35 +01:00
Julien Fontanet
a2ab64b142 chore(xo-server): Xapi#exportDeltaVm accept a snapshot 2018-03-02 19:52:00 +01:00
Julien Fontanet
052817ccbf chore(xo-server): RemoteHandler#rename handle cheksum 2018-03-02 19:51:03 +01:00
Julien Fontanet
48b2297bc1 chore(xo-server): handle nested job props (#2712) 2018-03-02 19:29:08 +01:00
Nicolas Raynaud
e76a0ad4bd feat(xo-server): improve VHD merge speed (#2643)
Avoid re-opening/closing the files multiple times, introduce a lot of latency in remote FS.
2018-03-02 19:08:01 +01:00
Olivier Lambert
baf6d30348 fix(changelog): remove useless spaces 2018-03-02 18:31:32 +01:00
Olivier Lambert
7d250dd90b feat(changelog): move and update changelog 2018-03-02 18:30:22 +01:00
Rajaa.BARHTAOUI
efaabb02e8 feat(xo-web): confirm modal before host emergency shutdown (#2714)
Fixes #2230
2018-03-02 18:05:58 +01:00
Julien Fontanet
0c3b98d451 fix(xo-server): forward createOutputStream errors with checksum 2018-03-02 15:29:26 +01:00
Julien Fontanet
28d1539ea6 fix(xo-server): fix Xapi#snapshotVm
It was broken by #2701.
2018-03-02 10:53:49 +01:00
Julien Fontanet
8ad02d2d51 feat(xo-web): ActionButton accept data-* props instead of handlerParam (#2713) 2018-03-02 09:57:26 +01:00
Julien Fontanet
1947a066e0 chore: disable flow for test
Still some config issues which I have to fix.
2018-03-01 16:30:02 +01:00
Julien Fontanet
d99e643634 chore(xo-server): inject schedule in jobs (#2710) 2018-03-01 16:27:51 +01:00
Rajaa.BARHTAOUI
65e1ac2ef9 chore(xo-web): consistently use "Username" label (#2709)
Fixes #2651
2018-03-01 15:58:48 +01:00
Julien Fontanet
64a768090f fix(xo-server): typo, executor → executors 2018-03-01 13:37:40 +01:00
Julien Fontanet
488eed046e chore(xo-server): pluggable job executors (#2707) 2018-03-01 12:10:08 +01:00
Julien Fontanet
dccddd78a6 chore(xo-web): rewrite smart-backup-pattern (#2698)
Fix a few issues
2018-02-28 17:07:16 +01:00
Julien Fontanet
3c247abcf9 chore(xo-web): add exact prop to NavLink (#2699) 2018-02-28 17:05:44 +01:00
Julien Fontanet
db795e91fd feat(complex-matcher): 0.3.0 2018-02-28 16:40:18 +01:00
Julien Fontanet
f060f56c93 feat(complex-matcher): number comparison (#2702)
`foo:>=42` matches `{ foo: 42 }` but not `"bar"` nor `{ foo: 37 }`.
2018-02-28 16:36:54 +01:00
Julien Fontanet
51be573f5e chore(xo-web): rewrite smart-backup-pattern 2018-02-28 16:23:29 +01:00
Julien Fontanet
4257cbb618 chore(xo-server): improve jobs code (#2696)
- add type filtering (default to `call`)
- support passing extra params to the call
- Flow typing
2018-02-28 16:22:41 +01:00
Julien Fontanet
e25d6b712d chore(xo-web): addSubscriptions provide initial props (#2697) 2018-02-28 16:09:56 +01:00
Julien Fontanet
b499d60130 chore(xo-server): improve scheduling code (#2695) 2018-02-28 15:59:19 +01:00
Julien Fontanet
68e06303a4 chore(xo-server): more cancelable Xapi methods (#2701) 2018-02-28 15:25:22 +01:00
badrAZ
60085798f2 fix(xo-web/jobs/vm.revert): use the snapshot's id instead of the vm's id (#2685)
Fixes #2498
2018-02-28 14:33:05 +01:00
badrAZ
c62cab39f1 feat(xo-web/VM): change the "share" button position (#2667)
Fixes #2663
2018-02-28 14:10:27 +01:00
Julien Fontanet
30483ab2d9 feat(xo-web): pass userData to SortedTable actions (#2700) 2018-02-28 13:43:41 +01:00
Julien Fontanet
c38c716616 chore(xo-server): use sepecific Babel plugins instead of stage-0 (#2694) 2018-02-28 12:59:23 +01:00
Julien Fontanet
ded1127d64 chore: mutualize Babel 7 config 2018-02-26 22:30:37 +01:00
Julien Fontanet
38d6130e89 chore(xo-cli): remove flow test 2018-02-26 21:58:32 +01:00
Julien Fontanet
ee47e40d1a feat(xo-web/logs): display real job status (#2688) 2018-02-26 18:02:39 +01:00
Julien Fontanet
80e66415d7 feat(xo-server): 5.16.2 2018-02-26 11:26:02 +01:00
Julien Fontanet
81e6372070 feat(xen-api): 0.16.6 2018-02-26 11:23:20 +01:00
Julien Fontanet
dbfbd42d29 fix(xo-server): identifiable names for VM export snapshots
Fixes #2668
2018-02-24 23:00:50 +01:00
Julien Fontanet
e0d34b1747 fix(xo-server): CR with lazy streams (#2675) 2018-02-23 17:50:17 +01:00
Julien Fontanet
9a8f9dd1d7 feat(xo-web): display attached VDI snapshots in Health (#2684)
Fixes #2634
2018-02-23 16:30:40 +01:00
Pierre Donias
75521f8757 fix(xo-server): do not count snapshots in self quotas (#2682)
Fixes #2626
2018-02-23 15:00:23 +01:00
Julien Fontanet
11d4cb2f04 fix(xo-server): detect interruption of full backups (#2686) 2018-02-23 13:07:48 +01:00
Rajaa.BARHTAOUI
d90cb09b56 feat(xo-web): disconnect VDIs from SR/disks view (#2602)
See #2505
2018-02-23 10:03:20 +01:00
Rajaa.BARHTAOUI
a02d393457 fix(xo-web/VM): allow self-service user to insert CD (#2647)
Fixes #2503
2018-02-22 16:42:43 +01:00
Julien Fontanet
01a5963947 feat(xen-api): allow createTask in read-only mode (#2679)
Fixes #2678
2018-02-22 15:50:35 +01:00
Julien Fontanet
7ef314d9f4 chore(lint-staged): rewritten in JS (#2676)
- simpler code, no need to hack around the shell
- no more double formatting
- no longer use git stash, simply cache files in memory
2018-02-22 11:45:44 +01:00
Julien Fontanet
2ff25d1f61 fix(xo-server): limit number of VDI exports (#2673)
Fixes #2672
2018-02-21 19:26:39 +01:00
Julien Fontanet
ede12b6732 fix(xo-server): limit number of VM exports (#2671)
Fixes #2669
2018-02-21 17:37:07 +01:00
Julien Fontanet
8a010f62fd chore(xo-server): remove unused Xapi#exportVdi 2018-02-21 17:32:30 +01:00
badrAZ
51da4a7e70 fix(xo-web/VM): show error when setting resource set fails (#2638)
Fixes #2620
2018-02-20 14:44:24 +01:00
Julien Fontanet
fd2580f5da feat(xo-cli): document config export (#2662) 2018-02-20 11:43:38 +01:00
Julien Fontanet
c5fdab7d47 feat(cron): 1.0.2 2018-02-20 11:42:19 +01:00
Julien Fontanet
ae094438b1 fix(cron): Schedule#next() with moment 2018-02-20 11:41:12 +01:00
Julien Fontanet
3e5af9e894 chore: update dependencies 2018-02-19 18:10:05 +01:00
Julien Fontanet
10093afb91 feat(cron): 1.0.1 2018-02-19 17:06:17 +01:00
Julien Fontanet
58032738b9 chore(cron): replace luxon with moment-timezone (#2657) 2018-02-19 17:04:04 +01:00
Julien Fontanet
89cbbaeeea chore: fix yarn.lock 2018-02-19 15:52:17 +01:00
Julien Fontanet
5ca08eb400 feat(xo-server): 5.16.1 2018-02-19 14:11:10 +01:00
Julien Fontanet
fad049d2ac feat(xo-web): 5.16.2 2018-02-19 14:10:37 +01:00
Julien Fontanet
87466cb5bd feat(value-matcher): 0.2.0 2018-02-19 14:08:38 +01:00
Julien Fontanet
bb69ad8019 feat(xo-server-backup-reports): 0.10.0 2018-02-19 14:02:13 +01:00
Julien Fontanet
d5373b85c7 feat(xen-api): 0.16.5 2018-02-19 13:59:17 +01:00
Julien Fontanet
d71e699582 feat(xo.getAllObjects): implement ndjson handler (#2500) 2018-02-19 13:41:44 +01:00
Julien Fontanet
f464751752 fix(xen-api): enforce event timeout (#2653) 2018-02-19 11:45:10 +01:00
badrAZ
d69f355206 feat(xo-web/jobs): show number of VMs in logs status selector (#2636)
Fixes #2618
2018-02-19 11:05:03 +01:00
badrAZ
769c32a1b1 fix(xo-web): the name 'ip' is used by the module ip-regex (#2654) 2018-02-19 10:05:10 +01:00
Julien Fontanet
44075d2ad5 chore: update dependencies 2018-02-18 21:53:23 +01:00
badrAZ
2b4ae3f1c2 feat(xo-server-backup-reports): improve the unhealthy VDI chain message (#2640)
Fixes #2639
2018-02-16 15:24:55 +01:00
Julien Fontanet
8689b48c55 fix(xo-server/authentication): fail fast with empty passwords
There is no reason to attempt authentication with empty passwords, and this work around issues with some LDAP servers which may allow binds with empty passwords.

See xoa-support#469.
2018-02-16 11:47:01 +01:00
Julien Fontanet
443882f4be feat(scripts): dev 2018-02-15 15:20:06 +01:00
Pierre Donias
9c3adaedf0 fix(select-objects/SelectSshKey): initialize state (#2646)
Fixes #2645
2018-02-15 12:38:04 +01:00
Julien Fontanet
4ba4d383f0 feat(backup): parallelism improvements (#2641)
Do not limit the concurrency at the job level, only limit VHD merges (2) and VM snapshots (1).
2018-02-15 12:14:39 +01:00
Julien Fontanet
947febcdaa chore(xo-server): update limit-concurrency-decorator to 0.3.0 2018-02-14 17:05:25 +01:00
badrAZ
b0a152612e feat(backup): new "skipped" state (#2612)
Fixes #2591
2018-02-14 11:22:30 +01:00
badrAZ
c9991655cf fix(xo-server): remove VM from nonexistent resource sets (#2637) 2018-02-13 16:58:05 +01:00
badrAZ
8971d218bb feat: share a vm in a resource set (#2611)
Fixes #2589
2018-02-13 15:24:20 +01:00
Julien Fontanet
8b05486945 fix(xo-server/scheduling): use local TZ by default 2018-02-13 14:50:10 +01:00
Julien Fontanet
42cd5d8e58 chore(xo-server/utils): remove scheduleFn 2018-02-13 14:49:50 +01:00
Julien Fontanet
31f420937e chore: fix yarn.lock 2018-02-13 14:41:12 +01:00
badrAZ
952d086c5e feat: use @xen-orchestra/cron (#2619)
Fixes #2616
2018-02-13 14:39:26 +01:00
Julien Fontanet
422a04795f chore: update dependencies 2018-02-13 12:05:52 +01:00
Julien Fontanet
28a8807cfb chore(xo-web): remove unused package.json entries 2018-02-13 11:16:02 +01:00
Julien Fontanet
c6327a953b chore(cron): improve documentation 2018-02-13 10:44:21 +01:00
Julien Fontanet
cc71f2e4a3 feat(cron): 1.0.0 2018-02-12 12:21:22 +01:00
Julien Fontanet
701086ec26 feat(cron): add Schedule#startJob() 2018-02-12 12:17:05 +01:00
Julien Fontanet
20acaebb68 fix(xo-server/vhd-merge): explicit error when no block found 2018-02-11 13:38:24 +01:00
Julien Fontanet
22997cd903 chore: improve Babel 7 configs
- easier to customize
- all target configuration is done in package.json
2018-02-10 23:09:29 +01:00
Julien Fontanet
7cb720b11f chore: format all code (#2632) 2018-02-09 17:56:03 +01:00
362 changed files with 24576 additions and 14380 deletions

View File

@@ -2,13 +2,21 @@ module.exports = {
extends: ['standard', 'standard-jsx'],
globals: {
__DEV__: true,
$Dict: true,
$Diff: true,
$Exact: true,
$Keys: true,
$PropertyType: true,
$Shape: true,
},
parser: 'babel-eslint',
rules: {
'comma-dangle': ['error', 'always-multiline'],
indent: 'off',
'no-var': 'error',
'node/no-extraneous-import': 'error',
'node/no-extraneous-require': 'error',
'prefer-const': 'error',
'react/jsx-indent': 'off',
},
}

View File

@@ -8,6 +8,7 @@
[lints]
[options]
esproposal.decorators=ignore
include_warnings=true
module.use_strict=true

2
.gitignore vendored
View File

@@ -8,6 +8,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/plot.dat
/packages/xo-server/.xo-server.*

View File

@@ -1,4 +1,5 @@
module.exports = {
semi: false,
singleQuote: true,
trailingComma: 'es5',
}

View File

@@ -1,12 +1,18 @@
language: node_js
node_js:
- stable
#- stable # disable for now due to an issue of indirect dep upath with Node 9
- 8
- 6
# Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
sudo: false
addons:
apt:
packages:
- qemu-utils
- blktap-utils
- vmdk-stream-converter
before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash
@@ -14,3 +20,7 @@ before_install:
cache:
yarn: true
script:
- yarn run test
- yarn run test-integration

View File

@@ -0,0 +1,63 @@
'use strict'
const PLUGINS_RE = /^(?:@babel\/|babel-)plugin-.+$/
const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
legacy: true,
},
'@babel/preset-env' (pkg) {
return {
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? (() => {
let node = (pkg.engines || {}).node
if (node !== undefined) {
const trimChars = '^=>~'
while (trimChars.includes(node[0])) {
node = node.slice(1)
}
return { node: node }
}
})()
: { browsers: '', node: 'current' },
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
}
},
}
const getConfig = (key, ...args) => {
const config = configs[key]
return config === undefined
? {}
: typeof config === 'function'
? config(...args)
: config
}
module.exports = function (pkg, plugins, presets) {
plugins === undefined && (plugins = {})
presets === undefined && (presets = {})
Object.keys(pkg.devDependencies || {}).forEach(name => {
if (!(name in presets) && PLUGINS_RE.test(name)) {
plugins[name] = getConfig(name, pkg)
} else if (!(name in presets) && PRESETS_RE.test(name)) {
presets[name] = getConfig(name, pkg)
}
})
return {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
}
}

View File

@@ -0,0 +1,11 @@
{
"private": true,
"name": "@xen-orchestra/babel-config",
"version": "0.0.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
}
}

View File

@@ -1,41 +1,3 @@
'use strict'
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const pkg = require('./package')
let nodeCompat = (pkg.engines || {}).node
if (nodeCompat === undefined) {
nodeCompat = '6'
} else {
const trimChars = '^=>~'
while (trimChars.includes(nodeCompat[0])) {
nodeCompat = nodeCompat.slice(1)
}
}
module.exports = {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
plugins: ['lodash'],
presets: [
[
'@babel/env',
{
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? {
browsers: '>2%',
node: nodeCompat,
}
: { node: 'current' },
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
},
],
'@babel/flow',
],
}
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -10,28 +10,6 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/cron)
> npm install --save @xen-orchestra/cron
```
## Usage
```js
import { createSchedule } from '@xen-orchestra/cron'
const schedule = createSchedule('0 0 * * sun', 'America/New_York')
schedule.next(2)
// [ 2018-02-11T05:00:00.000Z, 2018-02-18T05:00:00.000Z ]
const job = schedule.createJob(() => {
console.log(new Date())
})
job.start()
job.stop()
```
> If the scheduled job returns a promise, its resolution (or
> rejection) will be awaited before scheduling the next run.
### Pattern syntax
```
@@ -63,6 +41,75 @@ Step values can be used in conjunctions with ranges. For instance,
> [cron](https://github.com/kelektiv/node-cron), it does not appear to
> be very standard though.
### API
`createSchedule(pattern: string, zone: string = 'utc'): Schedule`
> Create a new schedule.
- `pattern`: the pattern to use, see [the syntax](#pattern-syntax)
- `zone`: the timezone to use, use `'local'` for the local timezone
```js
import { createSchedule } from '@xen-orchestra/cron'
const schedule = createSchedule('0 0 * * sun', 'America/New_York')
```
`Schedule#createJob(fn: Function): Job`
> Create a new job from this schedule.
- `fn`: function to execute, if it returns a promise, it will be
awaited before scheduling the next run.
```js
const job = schedule.createJob(() => {
console.log(new Date())
})
```
`Schedule#next(n: number): Array<Date>`
> Returns the next dates matching this schedule.
- `n`: number of dates to return
```js
schedule.next(2)
// [ 2018-02-11T05:00:00.000Z, 2018-02-18T05:00:00.000Z ]
```
`Schedule#startJob(fn: Function): () => void`
> Start a new job from this schedule and return a function to stop it.
- `fn`: function to execute, if it returns a promise, it will be
awaited before scheduling the next run.
```js
const stopJob = schedule.startJob(() => {
console.log(new Date())
})
stopJob()
```
`Job#start(): void`
> Start this job.
```js
job.start()
```
`Job#stop(): void`
> Stop this job.
```js
job.stop()
```
## Development
```

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "0.2.0",
"version": "1.0.3",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [
@@ -14,7 +14,7 @@
"scheduling",
"task"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
@@ -30,18 +30,21 @@
"files": [
"dist/"
],
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"lodash": "^4.17.4",
"luxon": "^0.4.0"
"moment-timezone": "^0.5.14"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.39",
"@babel/core": "7.0.0-beta.39",
"@babel/preset-env": "7.0.0-beta.39",
"@babel/preset-flow": "7.0.0-beta.39",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -1,4 +1,4 @@
import { DateTime } from 'luxon'
import moment from 'moment-timezone'
import next from './next'
import parse from './parse'
@@ -18,9 +18,10 @@ class Job {
}
const scheduleNext = () => {
const delay = schedule._nextDelay()
this._timeout = delay < MAX_DELAY
? setTimeout(wrapper, delay)
: setTimeout(scheduleNext, MAX_DELAY)
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)
: setTimeout(scheduleNext, MAX_DELAY)
}
this._scheduleNext = scheduleNext
@@ -40,7 +41,10 @@ class Job {
class Schedule {
constructor (pattern, zone = 'utc') {
this._schedule = parse(pattern)
this._dateTimeOpts = { zone }
this._createDate =
zone.toLowerCase() === 'utc'
? moment.utc
: zone === 'local' ? moment : () => moment.tz(zone)
}
createJob (fn) {
@@ -50,17 +54,23 @@ class Schedule {
next (n) {
const dates = new Array(n)
const schedule = this._schedule
let date = DateTime.fromObject(this._dateTimeOpts)
let date = this._createDate()
for (let i = 0; i < n; ++i) {
dates[i] = (date = next(schedule, date)).toJSDate()
dates[i] = (date = next(schedule, date)).toDate()
}
return dates
}
_nextDelay () {
const now = DateTime.fromObject(this._dateTimeOpts)
const now = this._createDate()
return next(this._schedule, now) - now
}
startJob (fn) {
const job = this.createJob(fn)
job.start()
return job.stop.bind(job)
}
}
export const createSchedule = (...args) => new Schedule(...args)

View File

@@ -1,50 +1,49 @@
import moment from 'moment-timezone'
import sortedIndex from 'lodash/sortedIndex'
import { DateTime } from 'luxon'
const NEXT_MAPPING = {
month: { year: 1 },
day: { month: 1 },
weekday: { week: 1 },
date: { month: 1 },
day: { week: 1 },
hour: { day: 1 },
minute: { hour: 1 },
}
const getFirst = values => values !== undefined ? values[0] : 0
const getFirst = values => (values !== undefined ? values[0] : 0)
const setFirstAvailable = (date, unit, values) => {
if (values === undefined) {
return date
return
}
const curr = date.get(unit)
const next = values[sortedIndex(values, curr) % values.length]
if (curr === next) {
return date
return
}
const newDate = date.set({ [unit]: next })
return newDate > date ? newDate : newDate.plus(NEXT_MAPPING[unit])
const timestamp = +date
date.set(unit, next)
if (timestamp > +date) {
date.add(NEXT_MAPPING[unit])
}
return true
}
// returns the next run, after the passed date
export default (schedule, fromDate) => {
let date = fromDate
let date = moment(fromDate)
.set({
second: 0,
millisecond: 0,
})
.plus({ minute: 1 })
.add({ minute: 1 })
const { minute, hour, dayOfMonth, month, dayOfWeek } = schedule
date = setFirstAvailable(date, 'minute', minute)
setFirstAvailable(date, 'minute', minute)
let tmp
tmp = setFirstAvailable(date, 'hour', hour)
if (tmp !== date) {
date = tmp.set({
minute: getFirst(minute),
})
if (setFirstAvailable(date, 'hour', hour)) {
date.set('minute', getFirst(minute))
}
let loop
@@ -52,30 +51,30 @@ export default (schedule, fromDate) => {
do {
loop = false
tmp = setFirstAvailable(date, 'month', month)
if (tmp !== date) {
date = tmp.set({
day: 1,
if (setFirstAvailable(date, 'month', month)) {
date.set({
date: 1,
hour: getFirst(hour),
minute: getFirst(minute),
})
}
let newDate = date.clone()
if (dayOfMonth === undefined) {
if (dayOfWeek !== undefined) {
tmp = setFirstAvailable(date, 'weekday', dayOfWeek)
setFirstAvailable(newDate, 'day', dayOfWeek)
}
} else if (dayOfWeek === undefined) {
tmp = setFirstAvailable(date, 'day', dayOfMonth)
setFirstAvailable(newDate, 'date', dayOfMonth)
} else {
tmp = DateTime.min(
setFirstAvailable(date, 'day', dayOfMonth),
setFirstAvailable(date, 'weekday', dayOfWeek)
)
const dateDay = newDate.clone()
setFirstAvailable(dateDay, 'date', dayOfMonth)
setFirstAvailable(newDate, 'day', dayOfWeek)
newDate = moment.min(dateDay, newDate)
}
if (tmp !== date) {
loop = tmp.month !== date.month
date = tmp.set({
if (+date !== +newDate) {
loop = date.month() !== newDate.month()
date = newDate.set({
hour: getFirst(hour),
minute: getFirst(minute),
})

View File

@@ -1,17 +1,15 @@
/* eslint-env jest */
import mapValues from 'lodash/mapValues'
import { DateTime } from 'luxon'
import moment from 'moment-timezone'
import next from './next'
import parse from './parse'
const N = (pattern, fromDate = '2018-04-09T06:25') =>
next(parse(pattern), DateTime.fromISO(fromDate, { zone: 'utc' })).toISO({
includeOffset: false,
suppressMilliseconds: true,
suppressSeconds: true,
})
const N = (pattern, fromDate = '2018-04-09T06:25') => {
const iso = next(parse(pattern), moment.utc(fromDate)).toISOString()
return iso.slice(0, iso.lastIndexOf(':'))
}
describe('next()', () => {
mapValues(
@@ -43,4 +41,8 @@ describe('next()', () => {
'no solutions found for this schedule'
)
})
it('select the first sunday of the month', () => {
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
})
})

View File

@@ -90,7 +90,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
if (!match('/')) {
return
}
[start, end] = field.range
;[start, end] = field.range
step = parseInteger()
} else {
start = parseValue()
@@ -173,18 +173,13 @@ export default createParser({
{
aliases: 'jan feb mar apr may jun jul aug sep oct nov dec'.split(' '),
name: 'month',
range: [1, 12],
// this function is applied to numeric entries (not steps)
//
// currently parse month 0-11
post: value => value + 1,
range: [0, 11],
},
{
aliases: 'mon tue wen thu fri sat sun'.split(' '),
aliases: 'sun mon tue wen thu fri sat'.split(' '),
name: 'dayOfWeek',
post: value => (value === 0 ? 7 : value),
range: [1, 7],
post: value => (value === 7 ? 0 : value),
range: [0, 6],
},
],
presets: {

View File

@@ -8,29 +8,27 @@ describe('parse()', () => {
minute: [0],
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dayOfMonth: [1, 11, 21, 31],
month: [1, 3, 5, 8, 11],
month: [0, 2, 4, 7, 10],
})
})
it('correctly parse months', () => {
expect(parse('* * * 0,11 *')).toEqual({
month: [1, 12],
month: [0, 11],
})
expect(parse('* * * jan,dec *')).toEqual({
month: [1, 12],
month: [0, 11],
})
})
it('correctly parse days', () => {
expect(parse('* * * * mon,sun')).toEqual({
dayOfWeek: [1, 7],
dayOfWeek: [0, 1],
})
})
it('reports missing integer', () => {
expect(() => parse('*/a')).toThrow(
'minute: missing integer at character 2'
)
expect(() => parse('*/a')).toThrow('minute: missing integer at character 2')
expect(() => parse('*')).toThrow('hour: missing integer at character 1')
})
@@ -42,10 +40,10 @@ describe('parse()', () => {
it('dayOfWeek: 0 and 7 bind to sunday', () => {
expect(parse('* * * * 0')).toEqual({
dayOfWeek: [7],
dayOfWeek: [0],
})
expect(parse('* * * * 7')).toEqual({
dayOfWeek: [7],
dayOfWeek: [0],
})
})
})

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,54 @@
{
"name": "@xen-orchestra/fs",
"version": "0.0.1",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.49",
"@marsaud/smb2-promise": "^0.2.1",
"execa": "^0.10.0",
"fs-extra": "^6.0.1",
"get-stream": "^3.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.9.5",
"through2": "^2.0.3",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,291 @@
// @flow
import getStream from 'get-stream'
import { randomBytes } from 'crypto'
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
import { type Readable, type Writable } from 'stream'
import { parse } from 'xo-remote-parser'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
type Data = Buffer | Readable | string
type FileDescriptor = {| fd: mixed, path: string |}
type LaxReadable = Readable & Object
type LaxWritable = Writable & Object
type File = FileDescriptor | string
const checksumFile = file => file + '.checksum'
export default class RemoteHandlerAbstract {
_remote: Object
constructor (remote: any) {
this._remote = { ...remote, ...parse(remote.url) }
if (this._remote.type !== this.type) {
throw new Error('Incorrect remote type')
}
}
get type (): string {
throw new Error('Not implemented')
}
/**
* Asks the handler to sync the state of the effective remote with its' metadata
*/
async sync (): Promise<mixed> {
return this._sync()
}
async _sync (): Promise<mixed> {
throw new Error('Not implemented')
}
/**
* Free the resources possibly dedicated to put the remote at work, when it is no more needed
*/
async forget (): Promise<void> {
await this._forget()
}
async _forget (): Promise<void> {
throw new Error('Not implemented')
}
async test (): Promise<Object> {
const testFileName = `${Date.now()}.test`
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
let step = 'write'
try {
await this.outputFile(testFileName, data)
step = 'read'
const read = await this.readFile(testFileName)
if (data.compare(read) !== 0) {
throw new Error('output and input did not match')
}
return {
success: true,
}
} catch (error) {
return {
success: false,
step,
file: testFileName,
error: error.message || String(error),
}
} finally {
ignoreErrors.call(this.unlink(testFileName))
}
}
async outputFile (file: string, data: Data, options?: Object): Promise<void> {
return this._outputFile(file, data, {
flags: 'wx',
...options,
})
}
async _outputFile (file: string, data: Data, options?: Object): Promise<void> {
const stream = await this.createOutputStream(file, options)
const promise = fromEvent(stream, 'finish')
stream.end(data)
await promise
}
async readFile (file: string, options?: Object): Promise<Buffer> {
return this._readFile(file, options)
}
_readFile (file: string, options?: Object): Promise<Buffer> {
return this.createReadStream(file, options).then(getStream.buffer)
}
async rename (
oldPath: string,
newPath: string,
{ checksum = false }: Object = {}
) {
let p = this._rename(oldPath, newPath)
if (checksum) {
p = Promise.all([
p,
this._rename(checksumFile(oldPath), checksumFile(newPath)),
])
}
return p
}
async _rename (oldPath: string, newPath: string) {
throw new Error('Not implemented')
}
async list (
dir: string = '.',
{
filter,
prependDir = false,
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
): Promise<string[]> {
let entries = await this._list(dir)
if (filter !== undefined) {
entries = entries.filter(filter)
}
if (prependDir) {
entries.forEach((entry, i) => {
entries[i] = dir + '/' + entry
})
}
return entries
}
async _list (dir: string): Promise<string[]> {
throw new Error('Not implemented')
}
createReadStream (
file: string,
{ checksum = false, ignoreMissingChecksum = false, ...options }: Object = {}
): Promise<LaxReadable> {
const path = typeof file === 'string' ? file : file.path
const streamP = this._createReadStream(file, options).then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (
stream.length === undefined &&
options.end === undefined &&
options.start === undefined
) {
promise = Promise.all([
promise,
ignoreErrors.call(
this.getSize(file).then(size => {
stream.length = size
})
),
])
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this.readFile(checksumFile(path)).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = (validChecksumOfReadStream(
stream,
String(checksum).trim()
): LaxReadable)
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
async _createReadStream (
file: string,
options?: Object
): Promise<LaxReadable> {
throw new Error('Not implemented')
}
async openFile (path: string, flags?: string): Promise<FileDescriptor> {
return { fd: await this._openFile(path, flags), path }
}
async _openFile (path: string, flags?: string): Promise<mixed> {
throw new Error('Not implemented')
}
async closeFile (fd: FileDescriptor): Promise<void> {
await this._closeFile(fd.fd)
}
async _closeFile (fd: mixed): Promise<void> {
throw new Error('Not implemented')
}
async refreshChecksum (path: string): Promise<void> {
const stream = (await this.createReadStream(path)).pipe(
createChecksumStream()
)
stream.resume() // start reading the whole file
await this.outputFile(checksumFile(path), await stream.checksum)
}
async createOutputStream (
file: File,
{ checksum = false, ...options }: Object = {}
): Promise<LaxWritable> {
const path = typeof file === 'string' ? file : file.path
const streamP = this._createOutputStream(file, {
flags: 'wx',
...options,
})
if (!checksum) {
return streamP
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
// $FlowFixMe
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this.outputFile(checksumFile(path), value))
.catch(forwardError)
return checksumStream
}
async _createOutputStream (
file: mixed,
options?: Object
): Promise<LaxWritable> {
throw new Error('Not implemented')
}
async unlink (file: string, { checksum = true }: Object = {}): Promise<void> {
if (checksum) {
ignoreErrors.call(this._unlink(checksumFile(file)))
}
await this._unlink(file)
}
async _unlink (file: mixed): Promise<void> {
throw new Error('Not implemented')
}
async getSize (file: mixed): Promise<number> {
return this._getSize(file)
}
async _getSize (file: mixed): Promise<number> {
throw new Error('Not implemented')
}
}

View File

@@ -0,0 +1,100 @@
// @flow
// $FlowFixMe
import through2 from 'through2'
import { createHash } from 'crypto'
import { defer, fromEvent } from 'promise-toolbox'
import { invert } from 'lodash'
import { type Readable, type Transform } from 'stream'
// Format: $<algorithm>$<salt>$<encrypted>
//
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
const ALGORITHM_TO_ID = {
md5: '1',
sha256: '5',
sha512: '6',
}
const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
// Create a through stream which computes the checksum of all data going
// through.
//
// The `checksum` attribute is a promise which resolves at the end of the stream
// with a string representation of the checksum.
//
// const source = ...
// const checksumStream = source.pipe(createChecksumStream())
// checksumStream.resume() // make the data flow without an output
// console.log(await checksumStream.checksum)
export const createChecksumStream = (
algorithm: string = 'md5'
): Transform & { checksum: Promise<string> } => {
const algorithmId = ALGORITHM_TO_ID[algorithm]
if (!algorithmId) {
throw new Error(`unknown algorithm: ${algorithm}`)
}
const hash = createHash(algorithm)
const { promise, resolve, reject } = defer()
const stream = through2(
(chunk, enc, callback) => {
hash.update(chunk)
callback(null, chunk)
},
callback => {
resolve(`$${algorithmId}$$${hash.digest('hex')}`)
callback()
}
).once('error', reject)
stream.checksum = promise
return stream
}
// Check if the checksum of a readable stream is equals to an expected checksum.
// The given stream is wrapped in a stream which emits an error event
// if the computed checksum is not equals to the expected checksum.
export const validChecksumOfReadStream = (
stream: Readable,
expectedChecksum: string
): Readable & { checksumVerified: Promise<void> } => {
const algorithmId = expectedChecksum.slice(
1,
expectedChecksum.indexOf('$', 1)
)
if (!algorithmId) {
throw new Error(`unknown algorithm: ${algorithmId}`)
}
const hash = createHash(ID_TO_ALGORITHM[algorithmId])
const wrapper: any = stream.pipe(
through2(
{ highWaterMark: 0 },
(chunk, enc, callback) => {
hash.update(chunk)
callback(null, chunk)
},
callback => {
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
callback(
checksum !== expectedChecksum
? new Error(
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
)
: null
)
}
)
)
stream.on('error', error => wrapper.emit('error', error))
wrapper.checksumVerified = fromEvent(wrapper, 'end')
return wrapper
}

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import { getHandler } from '.'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test("fs test doesn't crash", async () => {
const handler = getHandler({ url: 'file://' + process.cwd() })
const result = await handler.test()
expect(result.success).toBeTruthy()
})

View File

@@ -0,0 +1,26 @@
// @flow
import type RemoteHandler from './abstract'
import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerSmb from './smb'
export type { default as RemoteHandler } from './abstract'
export type Remote = { url: string }
const HANDLERS = {
file: RemoteHandlerLocal,
smb: RemoteHandlerSmb,
nfs: RemoteHandlerNfs,
}
export const getHandler = (remote: Remote): RemoteHandler => {
// FIXME: should be done in xo-remote-parser.
const type = remote.url.split('://')[0]
const Handler = HANDLERS[type]
if (!Handler) {
throw new Error('Unhandled remote type')
}
return new Handler(remote)
}

View File

@@ -63,13 +63,29 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _createReadStream (file, options) {
return fs.createReadStream(this._getFilePath(file), options)
if (typeof file === 'string') {
return fs.createReadStream(this._getFilePath(file), options)
} else {
return fs.createReadStream('', {
autoClose: false,
...options,
fd: file.fd,
})
}
}
async _createOutputStream (file, options) {
const path = this._getFilePath(file)
await fs.ensureDir(dirname(path))
return fs.createWriteStream(path, options)
if (typeof file === 'string') {
const path = this._getFilePath(file)
await fs.ensureDir(dirname(path))
return fs.createWriteStream(path, options)
} else {
return fs.createWriteStream('', {
autoClose: false,
...options,
fd: file.fd,
})
}
}
async _unlink (file) {
@@ -82,7 +98,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize (file) {
const stats = await fs.stat(this._getFilePath(file))
const stats = await fs.stat(
this._getFilePath(typeof file === 'string' ? file : file.path)
)
return stats.size
}
async _openFile (path, flags) {
return fs.open(this._getFilePath(path), flags)
}
async _closeFile (fd) {
return fs.close(fd)
}
}

View File

@@ -17,7 +17,14 @@ export default class NfsHandler extends LocalHandler {
let stdout
const mounted = {}
try {
stdout = await execa.stdout('findmnt', ['-P', '-t', 'nfs,nfs4', '--output', 'SOURCE,TARGET', '--noheadings'])
stdout = await execa.stdout('findmnt', [
'-P',
'-t',
'nfs,nfs4',
'--output',
'SOURCE,TARGET',
'--noheadings',
])
const regex = /^SOURCE="([^:]*):(.*)" TARGET="(.*)"$/
forEach(stdout.split('\n'), m => {
if (m) {
@@ -45,7 +52,14 @@ export default class NfsHandler extends LocalHandler {
async _mount () {
await fs.ensureDir(this._getRealPath())
return execa('mount', ['-t', 'nfs', '-o', 'vers=3', `${this._remote.host}:${this._remote.path}`, this._getRealPath()])
return execa('mount', [
'-t',
'nfs',
'-o',
'vers=3',
`${this._remote.host}:${this._remote.path}`,
this._getRealPath(),
])
}
async _sync () {

View File

@@ -1,19 +1,16 @@
import Smb2 from '@marsaud/smb2-promise'
import { lastly as pFinally } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
import {
noop,
pFinally,
} from '../utils'
const noop = () => {}
// Normalize the error code for file not found.
const normalizeError = error => {
const { code } = error
return (
code === 'STATUS_OBJECT_NAME_NOT_FOUND' ||
return code === 'STATUS_OBJECT_NAME_NOT_FOUND' ||
code === 'STATUS_OBJECT_PATH_NOT_FOUND'
)
? Object.create(error, {
code: {
configurable: true,
@@ -50,9 +47,7 @@ export default class SmbHandler extends RemoteHandlerAbstract {
file = undefined
}
let path = (this._remote.path !== '')
? this._remote.path
: ''
let path = this._remote.path !== '' ? this._remote.path : ''
// Ensure remote path is a directory.
if (path !== '' && path[path.length - 1] !== '\\') {
@@ -94,7 +89,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
await client.ensureDir(dir)
}
return client.writeFile(path, data, options)::pFinally(() => { client.close() })
return client.writeFile(path, data, options)::pFinally(() => {
client.close()
})
}
async _readFile (file, options = {}) {
@@ -102,7 +99,11 @@ export default class SmbHandler extends RemoteHandlerAbstract {
let content
try {
content = await client.readFile(this._getFilePath(file), options)::pFinally(() => { client.close() })
content = await client
.readFile(this._getFilePath(file), options)
::pFinally(() => {
client.close()
})
} catch (error) {
throw normalizeError(error)
}
@@ -114,7 +115,11 @@ export default class SmbHandler extends RemoteHandlerAbstract {
const client = this._getClient(this._remote)
try {
await client.rename(this._getFilePath(oldPath), this._getFilePath(newPath))::pFinally(() => { client.close() })
await client
.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
::pFinally(() => {
client.close()
})
} catch (error) {
throw normalizeError(error)
}
@@ -125,7 +130,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
let list
try {
list = await client.readdir(this._getFilePath(dir))::pFinally(() => { client.close() })
list = await client.readdir(this._getFilePath(dir))::pFinally(() => {
client.close()
})
} catch (error) {
throw normalizeError(error)
}
@@ -134,6 +141,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
}
async _createReadStream (file, options = {}) {
if (typeof file !== 'string') {
file = file.path
}
const client = this._getClient(this._remote)
let stream
@@ -149,6 +159,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
}
async _createOutputStream (file, options = {}) {
if (typeof file !== 'string') {
file = file.path
}
const client = this._getClient(this._remote)
const path = this._getFilePath(file)
const dir = this._dirname(path)
@@ -170,7 +183,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
const client = this._getClient(this._remote)
try {
await client.unlink(this._getFilePath(file))::pFinally(() => { client.close() })
await client.unlink(this._getFilePath(file))::pFinally(() => {
client.close()
})
} catch (error) {
throw normalizeError(error)
}
@@ -181,11 +196,22 @@ export default class SmbHandler extends RemoteHandlerAbstract {
let size
try {
size = await client.getSize(this._getFilePath(file))::pFinally(() => { client.close() })
size = await client
.getSize(this._getFilePath(typeof file === 'string' ? file : file.path))
::pFinally(() => {
client.close()
})
} catch (error) {
throw normalizeError(error)
}
return size
}
// this is a fake
async _openFile (path) {
return this._getFilePath(path)
}
async _closeFile (fd) {}
}

View File

@@ -1,121 +1,286 @@
# ChangeLog
## **5.20.0** (planned 2018-05-31)
### Enhancements
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
### Bugs
## **5.19.0** (2018-05-01)
### Enhancements
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
### Bugs
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
## **5.18.0** (2018-03-31)
### Enhancements
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
### Bugs
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
## **5.17.0** (2018-03-02)
### Enhancements
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
- Basic backup: snapshots names [#2668](https://github.com/vatesfr/xen-orchestra/issues/2668)
- Change placement of "share" button for self [#2663](https://github.com/vatesfr/xen-orchestra/issues/2663)
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
- Backup report for VDI chain status [#2639](https://github.com/vatesfr/xen-orchestra/issues/2639)
- [Dashboard/Health] Control domain VDIs should includes snapshots [#2634](https://github.com/vatesfr/xen-orchestra/issues/2634)
- Do not count VM-snapshot in self quota [#2626](https://github.com/vatesfr/xen-orchestra/issues/2626)
- [xo-web] Backup logs [#2618](https://github.com/vatesfr/xen-orchestra/issues/2618)
- [VM/Snapshots] grouped deletion [#2595](https://github.com/vatesfr/xen-orchestra/issues/2595)
- [Backups] add a new state for a VM: skipped [#2591](https://github.com/vatesfr/xen-orchestra/issues/2591)
- Set a self-service VM at "share" after creation [#2589](https://github.com/vatesfr/xen-orchestra/issues/2589)
- [Backup logs] Improve Unhealthy VDI Chain message [#2586](https://github.com/vatesfr/xen-orchestra/issues/2586)
- [SortedTable] Put sort criteria in URL like the filter [#2584](https://github.com/vatesfr/xen-orchestra/issues/2584)
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
### Bugs
- Limit VDI export concurrency [#2672](https://github.com/vatesfr/xen-orchestra/issues/2672)
- Select is broken outside dev mode [#2645](https://github.com/vatesfr/xen-orchestra/issues/2645)
- "New" XOSAN automatically register the user [#2625](https://github.com/vatesfr/xen-orchestra/issues/2625)
- [VM/Advanced] Error on resource set change should not be hidden [#2620](https://github.com/vatesfr/xen-orchestra/issues/2620)
- misspelled word [#2606](https://github.com/vatesfr/xen-orchestra/issues/2606)
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
## **5.16.0** (2018-01-31)
### Enhancements
- Use @xen-orchestra/cron everywhere [#2616](https://github.com/vatesfr/xen-orchestra/issues/2616)
- [SortedTable] Possibility to specify grouped/individual actions together [#2596](https://github.com/vatesfr/xen-orchestra/issues/2596)
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
- Two factor auth [#1897](https://github.com/vatesfr/xen-orchestra/issues/1897)
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
- Self Service User - User don't have quota in his dashboard [#1538](https://github.com/vatesfr/xen-orchestra/issues/1538)
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
### Bugs
- [cron] toJSDate is not a function [#2661](https://github.com/vatesfr/xen-orchestra/issues/2661)
- [Delta backup] Merge should not fail when delta contains no data [#2635](https://github.com/vatesfr/xen-orchestra/issues/2635)
- Select issues [#2590](https://github.com/vatesfr/xen-orchestra/issues/2590)
- Fix selects display [#2575](https://github.com/vatesfr/xen-orchestra/issues/2575)
- [SortedTable] Stuck when displaying last page [#2569](https://github.com/vatesfr/xen-orchestra/issues/2569)
- [vm/network] Duplicate key error [#2553](https://github.com/vatesfr/xen-orchestra/issues/2553)
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
- TZ selector is not used for backup schedule preview [#2464](https://github.com/vatesfr/xen-orchestra/issues/2464)
- Remove filter in VM/network view [#2548](https://github.com/vatesfr/xen-orchestra/issues/2548)
## **5.15.0** (2017-12-29)
### Enhancements
* VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
* Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
* New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
* Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
* [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
* Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
* Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
* Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
* Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
* [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
* Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
- VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
- Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
- New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
- Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
- [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
- Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
- Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
- Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
### Bugs
* VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
* Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
* [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
* Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
* VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
- VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
- Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
- [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
- Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
- VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
## **5.14.0** (2017-10-31)
### Enhancements
* VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
* [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
* Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
* [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
* [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
* [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
* Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
* Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
* [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
* token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
* On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
* Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
* Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
- VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
- [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
- Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
- [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
- [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
- [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
- Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
- Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
- [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
- On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
- Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
- Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
### Bugs
* Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
* Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
* [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
* [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
* [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
* [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
* Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
- Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
- Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
- [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
- [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
- [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
- [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
- Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
## **5.13.0** (2017-09-29)
### Enhancements
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
* Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
* [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
* Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
* Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
* Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
* Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
* [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
* [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
* Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
* [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
* [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
* SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
- Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
- [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
- Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
- Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
- Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
- Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
- [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
- [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
- Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
- [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
- [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
- SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
### Bugs
* iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
* Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
* Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
* iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
* A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
- iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
- Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
- Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
- iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
- A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
## **5.12.0** (2017-08-31)
### Enhancements
* PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
* Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
* SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
* Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
* "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
* [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
* Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
* It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
* Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
- PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
- Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
- SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
- Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
- "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
- [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
- Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
- It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
- Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
### Bugs
* Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
* VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
* Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
* Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
* Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
- Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
- VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
- Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
- Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
- Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
## **5.11.0** (2017-07-31)

46
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at julien.fontanet@vates.fr. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

16
PULL_REQUEST_TEMPLATE.md Normal file
View File

@@ -0,0 +1,16 @@
### Check list
- [ ] if UI changes, a screenshot has been added to the PR
- [ ] CHANGELOG updated
- [ ] documentation updated
### Process
1. create a PR as soon as possible
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
1. when you want a review, add a reviewer
1. if necessary, update your PR, and readd a reviewer
### List of packages to release
> No need to mention xo-server and xo-web.

5
babel.config.js Normal file
View File

@@ -0,0 +1,5 @@
module.exports = {
// Necessary for jest to be able to find the `.babelrc.js` closest to the file
// instead of only the one in this directory.
babelrcRoots: true,
}

View File

@@ -0,0 +1,6 @@
declare module 'limit-concurrency-decorator' {
declare function limitConcurrencyDecorator(
concurrency: number
): <T: Function>(T) => T
declare export default typeof limitConcurrencyDecorator
}

29
flow-typed/lodash.js vendored Normal file
View File

@@ -0,0 +1,29 @@
declare module 'lodash' {
declare export function forEach<K, V>(
object: { [K]: V },
iteratee: (V, K) => void
): void
declare export function groupBy<K, V>(
object: { [K]: V },
iteratee: K | ((V, K) => string)
): { [string]: V[] }
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
declare export function isEmpty(mixed): boolean
declare export function keyBy<T>(array: T[], iteratee: string): boolean
declare export function last<T>(array?: T[]): T | void
declare export function map<T1, T2>(
collection: T1[],
iteratee: (T1) => T2
): T2[]
declare export function mapValues<K, V1, V2>(
object: { [K]: V1 },
iteratee: (V1, K) => V2
): { [K]: V2 }
declare export function noop(...args: mixed[]): void
declare export function some<T>(
collection: T[],
iteratee: (T, number) => boolean
): boolean
declare export function sum(values: number[]): number
declare export function values<K, V>(object: { [K]: V }): V[]
}

14
flow-typed/promise-toolbox.js vendored Normal file
View File

@@ -0,0 +1,14 @@
declare module 'promise-toolbox' {
declare export function cancelable(Function): Function
declare export function defer<T>(): {|
promise: Promise<T>,
reject: T => void,
resolve: T => void,
|}
declare export function fromCallback<T>(
(cb: (error: any, value: T) => void) => void
): Promise<T>
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
declare export function ignoreErrors(): Promise<void>
declare export function timeout<T>(delay: number): Promise<T>
}

2
flow-typed/xo.js vendored Normal file
View File

@@ -0,0 +1,2 @@
// eslint-disable-next-line no-undef
declare type $Dict<T, K = string> = { [K]: T }

View File

@@ -1,45 +1,44 @@
{
"devDependencies": {
"@babel/register": "^7.0.0-beta.39",
"babel-7-jest": "^21.3.2",
"@babel/core": "^7.0.0-beta.49",
"@babel/register": "^7.0.0-beta.49",
"babel-core": "^7.0.0-0",
"babel-eslint": "^8.1.2",
"babel-jest": "^23.0.1",
"benchmark": "^2.1.4",
"eslint": "^4.14.0",
"eslint-config-standard": "^11.0.0-beta.0",
"eslint-config-standard-jsx": "^4.0.2",
"eslint-config-standard-jsx": "^5.0.0",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^6.0.0",
"eslint-plugin-promise": "^3.6.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^3.0.1",
"exec-promise": "^0.7.0",
"flow-bin": "^0.65.0",
"globby": "^7.1.1",
"flow-bin": "^0.73.0",
"globby": "^8.0.0",
"husky": "^0.14.3",
"jest": "^22.0.4",
"jest": "^23.0.1",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.9.5",
"sorted-object": "^2.0.1"
},
"engines": {
"yarn": "^1.2.1"
"yarn": "^1.7.0"
},
"jest": {
"collectCoverage": true,
"projects": ["<rootDir>", "<rootDir>/packages/xo-web"],
"projects": [
"<rootDir>"
],
"testEnvironment": "node",
"testPathIgnorePatterns": [
"/dist/",
"/xo-vmdk-to-vhd/",
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"transform": {
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
"\\.jsx?$": "babel-jest"
}
},
@@ -47,12 +46,14 @@
"scripts": {
"build": "scripts/run-script --parallel build",
"clean": "scripts/run-script --parallel clean",
"dev-test": "jest --bail --watch",
"dev": "scripts/run-script --parallel dev",
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
"posttest": "scripts/run-script test",
"precommit": "scripts/lint-staged",
"prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .",
"test": "jest && flow status"
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test-integration": "jest \".integ\\.spec\\.js$\""
},
"workspaces": [
"@xen-orchestra/*",

View File

@@ -1,28 +1,3 @@
const dependencies = require('./package').dependencies || {}
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
module.exports = {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
plugins: ['lodash'],
presets: [
[
'@babel/env',
{
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? {
browsers: '>2%',
node: '4',
}
: { node: 'current' },
useBuiltIns: '@babel/polyfill' in dependencies && 'usage',
},
],
],
}
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "complex-matcher",
"version": "0.2.1",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -20,6 +20,9 @@
"files": [
"dist/"
],
"browserslist": [
">2%"
],
"engines": {
"node": ">=4"
},
@@ -27,9 +30,9 @@
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.39",
"@babel/core": "7.0.0-beta.39",
"@babel/preset-env": "7.0.0-beta.39",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.1",
"rimraf": "^2.6.2"

View File

@@ -70,6 +70,29 @@ export class And extends Node {
}
}
export class Comparison extends Node {
constructor (operator, value) {
super()
this._comparator = Comparison.comparators[operator]
this._operator = operator
this._value = value
}
match (value) {
return typeof value === 'number' && this._comparator(value, this._value)
}
toString () {
return this._operator + String(this._value)
}
}
Comparison.comparators = {
'>': (a, b) => a > b,
'>=': (a, b) => a >= b,
'<': (a, b) => a < b,
'<=': (a, b) => a <= b,
}
export class Or extends Node {
constructor (children) {
super()
@@ -325,7 +348,10 @@ class P {
value.push(result.value)
pos = result.pos
}
while (i < max && (result = this._parse(input, pos, end)) instanceof Success) {
while (
i < max &&
(result = this._parse(input, pos, end)) instanceof Success
) {
++i
value.push(result.value)
pos = result.pos
@@ -359,8 +385,9 @@ P.eof = new P(
const parser = P.grammar({
default: r =>
P.seq(r.ws, r.term.repeat(), P.eof)
.map(([, terms]) => (terms.length === 0 ? new Null() : new And(terms))),
P.seq(r.ws, r.term.repeat(), P.eof).map(
([, terms]) => (terms.length === 0 ? new Null() : new And(terms))
),
quotedString: new P((input, pos, end) => {
if (input[pos] !== '"') {
return new Failure(pos, '"')
@@ -404,6 +431,13 @@ const parser = P.grammar({
P.text(')')
).map(_ => new Or(_[4])),
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
val = +val
if (Number.isNaN(val)) {
throw new TypeError('value must be a number')
}
return new Comparison(op, val)
}),
P.seq(r.string, r.ws, P.text(':'), r.ws, r.term).map(
_ => new Property(_[0], _[4])
),
@@ -416,7 +450,7 @@ const parser = P.grammar({
? new StringNode(str)
: new NumberNode(asNum)
})
),
)
).skip(r.ws),
ws: P.regex(/\s*/),
}).default
@@ -476,17 +510,19 @@ export const getPropertyClausesStrings = node => {
// -------------------------------------------------------------------
export const setPropertyClause = (node, name, child) => {
const property = child && new Property(
name,
typeof child === 'string' ? new StringNode(child) : child
)
const property =
child &&
new Property(
name,
typeof child === 'string' ? new StringNode(child) : child
)
if (node === undefined) {
return property
}
const children = (node instanceof And ? node.children : [node]).filter(child =>
!(child instanceof Property && child.name === name)
const children = (node instanceof And ? node.children : [node]).filter(
child => !(child instanceof Property && child.name === name)
)
if (property !== undefined) {
children.push(property)

View File

@@ -49,13 +49,15 @@ describe('Number', () => {
describe('setPropertyClause', () => {
it('creates a node if none passed', () => {
expect(setPropertyClause(undefined, 'foo', 'bar').toString()).toBe('foo:bar')
expect(setPropertyClause(undefined, 'foo', 'bar').toString()).toBe(
'foo:bar'
)
})
it('adds a property clause if there was none', () => {
expect(
setPropertyClause(parse('baz'), 'foo', 'bar').toString()
).toBe('baz foo:bar')
expect(setPropertyClause(parse('baz'), 'foo', 'bar').toString()).toBe(
'baz foo:bar'
)
})
it('replaces the property clause if there was one', () => {

View File

@@ -1,40 +1,3 @@
'use strict'
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const pkg = require('./package')
let nodeCompat = (pkg.engines || {}).node
if (nodeCompat === undefined) {
nodeCompat = '6'
} else {
const trimChars = '^=>~'
while (trimChars.includes(nodeCompat[0])) {
nodeCompat = nodeCompat.slice(1)
}
}
module.exports = {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
presets: [
[
'@babel/env',
{
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? {
browsers: '>2%',
node: nodeCompat,
}
: { node: 'current' },
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
},
],
'@babel/flow',
],
}
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "value-matcher",
"version": "0.1.0",
"version": "0.2.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -20,15 +20,18 @@
"files": [
"dist/"
],
"browserslist": [
">2%"
],
"engines": {
"node": ">=4"
},
"dependencies": {},
"devDependencies": {
"@babel/cli": "7.0.0-beta.39",
"@babel/core": "7.0.0-beta.39",
"@babel/preset-env": "7.0.0-beta.39",
"@babel/preset-flow": "7.0.0-beta.39",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -26,13 +26,16 @@ type ObjectPattern = { [string]: Pattern }
type ArrayPattern = Array<Pattern>
// value equals the pattern
type ValuePattern = bool | number | string
type ValuePattern = boolean | number | string
const match = (pattern: Pattern, value: any) => {
if (Array.isArray(pattern)) {
return Array.isArray(value) && pattern.every((subpattern, i) =>
// FIXME: subpatterns should match different subvalues
value.some(subvalue => match(subpattern, subvalue))
return (
Array.isArray(value) &&
pattern.every((subpattern, i) =>
// FIXME: subpatterns should match different subvalues
value.some(subvalue => match(subpattern, subvalue))
)
)
}
@@ -41,7 +44,7 @@ const match = (pattern: Pattern, value: any) => {
const { length } = keys
if (length === 1) {
const [ key ] = keys
const [key] = keys
if (key === '__and') {
const andPattern: AndPattern = (pattern: any)
return andPattern.__and.every(subpattern => match(subpattern, value))
@@ -74,4 +77,5 @@ const match = (pattern: Pattern, value: any) => {
return pattern === value
}
export const createPredicate = (pattern: Pattern) => (value: any) => match(pattern, value)
export const createPredicate = (pattern: Pattern) => (value: any) =>
match(pattern, value)

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-cli",
"version": "0.0.0",
"version": "0.0.1",
"license": "ISC",
"description": "",
"keywords": [],
@@ -23,45 +23,32 @@
"dist/"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {
"@nraynaud/struct-fu": "^1.0.1",
"@nraynaud/xo-fs": "^0.0.5",
"babel-runtime": "^6.22.0",
"exec-promise": "^0.7.0"
"@xen-orchestra/fs": "^0.0.1",
"exec-promise": "^0.7.0",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.1.1"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
"execa": "^0.10.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.9.5",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"babel": {
"plugins": [
"lodash",
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
],
"stage-3"
]
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,15 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const handler = getHandler({ url: 'file:///' })
for (const vhd of args) {
try {
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
console.log('ok:', vhd)
} catch (error) {
console.error('nok:', vhd, error)
}
}
}

View File

@@ -0,0 +1,12 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
await vhd.readHeaderAndFooter()
console.log(vhd.header)
console.log(vhd.footer)
}

View File

@@ -0,0 +1,21 @@
import path from 'path'
import { createSyntheticStream } from 'vhd-lib'
import { createWriteStream } from 'fs'
import { getHandler } from '@xen-orchestra/fs'
export default async function main (args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> <output VHD>`
}
const handler = getHandler({ url: 'file:///' })
return new Promise((resolve, reject) => {
createSyntheticStream(handler, path.resolve(args[0]))
.on('error', reject)
.pipe(
createWriteStream(args[1])
.on('error', reject)
.on('finish', resolve)
)
})
}

View File

@@ -1,19 +1,44 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
import { resolve } from 'path'
import Vhd from './vhd'
import commands from './commands'
execPromise(async args => {
const vhd = new Vhd(
new RemoteHandlerLocal({ url: 'file:///' }),
resolve(args[0])
function runCommand (commands, [command, ...args]) {
if (command === undefined || command === '-h' || command === '--help') {
command = 'help'
}
const fn = commands[command]
if (fn === undefined) {
if (command === 'help') {
return `Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${this.command} ${command}`)
.join('\n\n')}`
}
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
}
return fn.call(
{
__proto__: this,
command: `${this.command} ${command}`,
},
args
)
}
await vhd.readHeaderAndFooter()
console.log(vhd._header)
console.log(vhd._footer)
})
execPromise(
runCommand.bind(
{
command: 'vhd-cli',
runCommand,
},
commands
)
)

View File

@@ -0,0 +1,28 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import command from './commands/info'
const initialDir = process.cwd()
jest.setTimeout(10000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('can run the command', async () => {
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
await command(['empty.vhd'])
})

View File

@@ -1,441 +0,0 @@
import assert from 'assert'
import fu from '@nraynaud/struct-fu'
import { dirname } from 'path'
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
/* eslint-disable no-unused-vars */
const HARD_DISK_TYPE_DIFFERENCING = 4
const HARD_DISK_TYPE_DYNAMIC = 3
const HARD_DISK_TYPE_FIXED = 2
const PLATFORM_CODE_NONE = 0
export const SECTOR_SIZE = 512
/* eslint-enable no-unused vars */
// ===================================================================
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
fu.struct('dataOffset', [
fu.uint32('high'), // 16
fu.uint32('low'), // 20
]),
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
fu.struct('originalSize', [ // At the creation, current size of the hard disk.
fu.uint32('high'), // 40
fu.uint32('low'), // 44
]),
fu.struct('currentSize', [ // Current size of the virtual disk. At the creation: currentSize = originalSize.
fu.uint32('high'), // 48
fu.uint32('low'), // 52
]),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.byte('reserved', 426), // 86
])
const FOOTER_SIZE = fuFooter.size
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.struct('dataOffset', [
fu.uint32('high'),
fu.uint32('low'),
]),
fu.struct('tableOffset', [ // Absolute byte offset of the Block Allocation Table.
fu.uint32('high'),
fu.uint32('low'),
]),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.byte('reserved1', 4),
fu.char16be('parentUnicodeName', 512),
fu.struct('parentLocatorEntry', [
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
fu.struct('platformDataOffset', [ // Absolute byte offset of the locator data.
fu.uint32('high'),
fu.uint32('low'),
]),
], 8),
fu.byte('reserved2', 256),
])
const HEADER_SIZE = fuHeader.size
// ===================================================================
// Helpers
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000FFFF)
// bytes[] bit manipulation
const testBit = (map, bit) => map[bit >> 3] & 1 << (bit & 7)
const setBit = (map, bit) => {
map[bit >> 3] |= 1 << (bit & 7)
}
const unsetBit = (map, bit) => {
map[bit >> 3] &= ~(1 << (bit & 7))
}
const addOffsets = (...offsets) => offsets.reduce(
(a, b) => b == null
? a
: typeof b === 'object'
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
: { bytes: a.bytes + b, bits: a.bits },
{ bytes: 0, bits: 0 }
)
const pack = (field, value, buf, offset) => {
field.pack(
value,
buf,
addOffsets(field.offset, offset)
)
}
const unpack = (field, buf, offset) =>
field.unpack(
buf,
addOffsets(field.offset, offset)
)
// ===================================================================
const streamToNewBuffer = stream => new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
const streamToExistingBuffer = (
stream,
buffer,
offset = 0,
end = buffer.length
) => new Promise((resolve, reject) => {
assert(offset >= 0)
assert(end > offset)
assert(end <= buffer.length)
let i = offset
const onData = chunk => {
const prev = i
i += chunk.length
if (i > end) {
return onError(new Error('too much data'))
}
chunk.copy(buffer, prev)
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(i - offset)
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
// ===================================================================
// Returns the checksum of a raw struct.
const computeChecksum = (struct, buf, offset = 0) => {
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumField = struct.fields.checksum
const checksumOffset = offset + checksumField.offset
for (let i = offset, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (let i = checksumOffset + checksumField.size, n = offset + struct.size; i < n; ++i) {
sum += buf[i]
}
return ~sum >>> 0
}
const verifyChecksum = (struct, buf, offset) =>
unpack(struct.fields.checksum, buf, offset) === computeChecksum(struct, buf, offset)
const getParentLocatorSize = parentLocatorEntry => {
const { platformDataSpace } = parentLocatorEntry
if (platformDataSpace < SECTOR_SIZE) {
return platformDataSpace * SECTOR_SIZE
}
return (platformDataSpace % SECTOR_SIZE === 0)
? platformDataSpace
: 0
}
// ===================================================================
// Euclidean division, returns the quotient and the remainder of a / b.
const div = (a, b) => [ Math.floor(a / b), a % b ]
export default class Vhd {
constructor (handler, path) {
this._handler = handler
this._path = path
this._blockAllocationTable = null
this._blockBitmapSize = null
this._footer = null
this._header = null
this._parent = null
this._sectorsPerBlock = null
}
// Read `length` bytes starting from `begin`.
//
// - if `buffer`: it is filled starting from `offset`, and the
// number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_read (begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
return this._handler.createReadStream(this._path, {
end: begin + length - 1,
start: begin,
}).then(buf
? stream => streamToExistingBuffer(stream, buf, offset, (offset || 0) + length)
: streamToNewBuffer
)
}
// - if `buffer`: it is filled with 0 starting from `offset`, and
// the number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_zeroes (length, buf, offset = 0) {
if (buf) {
assert(offset >= 0)
assert(length > 0)
const end = offset + length
assert(end <= buf.length)
buf.fill(0, offset, end)
return Promise.resolve(length)
}
return Promise.resolve(Buffer.alloc(length))
}
// Return the position of a block in the VHD or undefined if not found.
_getBlockAddress (block) {
assert(block >= 0)
assert(block < this._header.maxTableEntries)
const blockAddr = this._blockAllocationTable[block]
if (blockAddr !== 0xFFFFFFFF) {
return blockAddr * SECTOR_SIZE
}
}
// -----------------------------------------------------------------
async readHeaderAndFooter () {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
if (!verifyChecksum(fuFooter, buf)) {
throw new Error('footer checksum does not match')
}
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
throw new Error('header checksum does not match')
}
return this._initMetadata(
unpack(fuHeader, buf, FOOTER_SIZE),
unpack(fuFooter, buf)
)
}
async _initMetadata (header, footer) {
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
assert(sectorsPerBlock % 1 === 0)
// 1 bit per sector, rounded up to full sectors
this._blockBitmapSize = Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
assert(this._blockBitmapSize === SECTOR_SIZE)
this._footer = footer
this._header = header
this.size = uint32ToUint64(this._footer.currentSize)
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
const parent = new Vhd(
this._handler,
`${dirname(this._path)}/${header.parentUnicodeName}`
)
await parent.readHeaderAndFooter()
await parent.readBlockAllocationTable()
this._parent = parent
}
}
// -----------------------------------------------------------------
async readBlockAllocationTable () {
const { maxTableEntries, tableOffset } = this._header
const fuTable = fu.uint32(maxTableEntries)
this._blockAllocationTable = unpack(
fuTable,
await this._read(uint32ToUint64(tableOffset), fuTable.size)
)
}
// -----------------------------------------------------------------
// read a single sector in a block
async _readBlockSector (block, sector, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
assert(begin + length <= SECTOR_SIZE)
const blockAddr = this._getBlockAddress(block)
const blockBitmapSize = this._blockBitmapSize
const parent = this._parent
if (blockAddr && (
!parent ||
testBit(await this._read(blockAddr, blockBitmapSize), sector)
)) {
return this._read(
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
length,
buf,
offset
)
}
return parent
? parent._readBlockSector(block, sector, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
_readBlock (block, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
const { blockSize } = this._header
assert(begin + length <= blockSize)
const blockAddr = this._getBlockAddress(block)
const parent = this._parent
if (!blockAddr) {
return parent
? parent._readBlock(block, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
if (!parent) {
return this._read(blockAddr + this._blockBitmapSize + begin, length, buf, offset)
}
// FIXME: we should read as many sectors in a single pass as
// possible for maximum perf.
const [ sector, beginInSector ] = div(begin, SECTOR_SIZE)
return this._readBlockSector(
block,
sector,
beginInSector,
Math.min(length, SECTOR_SIZE - beginInSector),
buf,
offset
)
}
read (buf, begin, length = buf.length, offset) {
assert(Buffer.isBuffer(buf))
assert(begin >= 0)
const { size } = this
if (begin >= size) {
return Promise.resolve(0)
}
const { blockSize } = this._header
const [ block, beginInBlock ] = div(begin, blockSize)
return this._readBlock(
block,
beginInBlock,
Math.min(length, blockSize - beginInBlock, size - begin),
buf,
offset
)
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,56 @@
{
"name": "vhd-lib",
"version": "0.1.1",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.49",
"async-iterator-to-stream": "^1.0.2",
"from2": "^2.3.0",
"fs-extra": "^6.0.1",
"get-stream": "^3.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.9.5",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"@xen-orchestra/fs": "^0.0.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^0.10.0",
"fs-promise": "^2.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,7 @@
const MASK = 0x80
export const set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -0,0 +1,37 @@
import { SECTOR_SIZE } from './_constants'
export default function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrackCylinder
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrackCylinder = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
} else {
sectorsPerTrackCylinder = 17
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrackCylinder = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrackCylinder = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
}
const cylinders = Math.ceil(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
}

View File

@@ -0,0 +1,30 @@
export const BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
export const CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
export const FOOTER_SIZE = 512
export const HEADER_SIZE = 1024
export const SECTOR_SIZE = 512
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPE_FIXED = 2
export const DISK_TYPE_DYNAMIC = 3
export const DISK_TYPE_DIFFERENCING = 4
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORM_NONE = 0
export const PLATFORM_WI2R = 0x57693272
export const PLATFORM_WI2K = 0x5769326b
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16

View File

@@ -0,0 +1,56 @@
import { v4 as generateUuid } from 'uuid'
import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_FIXED,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PLATFORM_WI2K,
} from './_constants'
export function createFooter (
size,
timestamp,
geometry,
dataOffset,
diskType = DISK_TYPE_FIXED
) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
fileFormatVersion: FILE_FORMAT_VERSION,
dataOffset,
timestamp,
creatorApplication: CREATOR_APPLICATION,
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
originalSize: size,
currentSize: size,
diskGeometry: geometry,
diskType,
uuid: generateUuid(null, []),
})
checksumStruct(footer, fuFooter)
return footer
}
export function createHeader (
maxTableEntries,
tableOffset = HEADER_SIZE + FOOTER_SIZE,
blockSize = VHD_BLOCK_SIZE_BYTES
) {
const header = fuHeader.pack({
cookie: HEADER_COOKIE,
tableOffset,
headerVersion: HEADER_VERSION,
maxTableEntries,
blockSize,
})
checksumStruct(header, fuHeader)
return header
}

View File

@@ -0,0 +1,121 @@
import assert from 'assert'
import fu from 'struct-fu'
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const uint64Undefinable = fu.derive(
fu.uint32(2),
number =>
number === undefined
? [0xffffffff, 0xffffffff]
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ =>
_[0] === 0xffffffff && _[1] === 0xffffffff
? undefined
: _[0] * SIZE_OF_32_BITS + _[1]
)
export const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64Undefinable('dataOffset'), // offset of the header
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
fu.char('reserved', 426), // 86
])
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
export const fuHeader = fu.struct([
fu.char('cookie', 8),
uint64Undefinable('dataOffset'),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
assert.strictEqual(fuHeader.size, HEADER_SIZE)
export const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
export const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
export function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}

View File

@@ -0,0 +1,37 @@
import { dirname, relative } from 'path'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockAllocationTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
}

View File

@@ -0,0 +1,42 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter } from './_createFooterHeader'
export default asyncIteratorToStream(async function * (size, blockParser) {
const geometry = computeGeometryForSize(size)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry
)
let position = 0
function * filePadding (paddingLength) {
if (paddingLength > 0) {
const chunkSize = 1024 * 1024 // 1Mo
for (
let paddingPosition = 0;
paddingPosition + chunkSize < paddingLength;
paddingPosition += chunkSize
) {
yield Buffer.alloc(chunkSize)
}
yield Buffer.alloc(paddingLength % chunkSize)
}
}
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield * filePadding(paddingLength)
yield next.data
position = next.offsetBytes + next.data.length
}
yield * filePadding(actualSize - position)
yield footer
})

View File

@@ -0,0 +1,126 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
import {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
*/
function createBAT (
firstBlockPosition,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % 512, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
}
export default asyncIteratorToStream(async function * (
diskSize,
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
throw new Error(
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
)
}
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
const batPosition = FOOTER_SIZE + HEADER_SIZE
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
const geometry = computeGeometryForSize(diskSize)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry,
FOOTER_SIZE,
DISK_TYPE_DYNAMIC
)
const header = createHeader(
maxTableEntries,
batPosition,
VHD_BLOCK_SIZE_BYTES
)
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
createBAT(firstBlockPosition, blockAddressList, ratio, bat, bitmapSize)
let position = 0
function * yieldAndTrack (buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
async function * generateFileContent (blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield * yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * 512
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
currentVhdBlockIndex = batIndex
}
const blockOffset = (next.offsetBytes / 512) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + next.offsetBytes % VHD_BLOCK_SIZE_BYTES
)
}
yield * yieldAndTrack(currentBlockWithBitmap)
}
yield * yieldAndTrack(footer, 0)
yield * yieldAndTrack(header, FOOTER_SIZE)
yield * yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield * generateFileContent(blockIterator, bitmapSize, ratio)
yield * yieldAndTrack(footer)
})

View File

@@ -0,0 +1,153 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { dirname, resolve } from 'path'
import Vhd from './vhd'
import {
BLOCK_UNUSED,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const resolveRelativeFromFile = (file, path) =>
resolve('/', dirname(file), path).slice(1)
export default asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockAllocationTable()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// this is the root VHD
const rootVhd = vhds[nVhds - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
const bat = Buffer.allocUnsafe(vhd.batSize)
let footer = {
...vhd.footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil(
(header.tableOffset + bat.length) / SECTOR_SIZE
);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
// TODO: for generic usage the bitmap needs to be properly computed for each block
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd === rootVhd
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})

View File

@@ -0,0 +1,8 @@
export { default } from './vhd'
export { default as chainVhd } from './chain'
export { default as createReadableRawStream } from './createReadableRawStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'

View File

@@ -0,0 +1,283 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { randomBytes } from 'crypto'
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
import chainVhd from './chain'
import createReadStream from './createSyntheticStream'
import Vhd from './vhd'
import vhdMerge from './merge'
import { SECTOR_SIZE } from './_constants'
const initialDir = process.cwd()
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
async function createRandomFile (name, sizeMb) {
await execa('bash', [
'-c',
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
])
}
async function checkFile (vhdName) {
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
}
async function recoverRawContent (vhdName, rawName, originalSize) {
await checkFile(vhdName)
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
if (originalSize !== undefined) {
await execa('truncate', ['-s', originalSize, rawName])
}
}
async function convertFromRawToVhd (rawName, vhdName) {
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
}
test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFile = await fs.open('recovered', 'w')
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
const block = (await vhd2._readBlock(i)).data
await fs.write(
recoveredFile,
block,
0,
block.length,
vhd2.header.blockSize * i
)
}
}
} finally {
fs.close(recoveredFile)
}
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
)
expect(recovered).toEqual(randomBuffer)
})
test('writeData on empty file', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(
splitPointSectors,
randomData.slice(splitPointSectors * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
await newVhd.writeData(
startSecondWrite,
randomData.slice(startSecondWrite * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('coalesce works with empty parent files', async () => {
const mbOfRandom = 2
await createRandomFile('randomfile', mbOfRandom)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
await execa('qemu-img', [
'create',
'-fvpc',
'empty.vhd',
mbOfRandom + 1 + 'M',
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
await createRandomFile('randomfile', mbOfRandom)
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
await execa('qemu-img', [
'create',
'-fvpc',
'parent.vhd',
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
await checkFile('parent.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await checkFile('parent.vhd')
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile('child2.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile('parent.vhd')
await recoverRawContent(
'parent.vhd',
'recovered_from_coalescing',
originalSize
)
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
await fs.readFile('randomfile2')
)
})
test('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = createReadStream(handler, 'randomfile.vhd')
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
)
await checkFile('recovered.vhd')
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
})

View File

@@ -0,0 +1,77 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import concurrency from 'limit-concurrency-decorator'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
// Merge vhd child into vhd parent.
export default concurrency(2)(async function merge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(
parentDiskType === DISK_TYPE_DIFFERENCING ||
parentDiskType === DISK_TYPE_DYNAMIC
)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
// Read allocation table of child/parent.
await Promise.all([
parentVhd.readBlockAllocationTable(),
childVhd.readBlockAllocationTable(),
])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
})

View File

@@ -0,0 +1,134 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
import { createFooter } from './_createFooterHeader'
import createReadableRawVHDStream from './createReadableRawStream'
import createReadableSparseVHDStream from './createReadableSparseStream'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const fileSize = 1000
const stream = createReadableRawVHDStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = createReadableRawVHDStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
})
).rejects.toThrow('Received out of order blocks')
})
test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
const fileSize = blockSize * 110
const stream = createReadableSparseVHDStream(
fileSize,
blockSize,
blocks.map(b => b.offsetBytes),
blocks
)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
])
const out1 = await readFile('out1.raw')
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

631
packages/vhd-lib/src/vhd.js Normal file
View File

@@ -0,0 +1,631 @@
import assert from 'assert'
import getStream from 'get-stream'
import { fromEvent } from 'promise-toolbox'
import constantStream from './_constant-stream'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
SECTOR_SIZE,
} from './_constants'
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
const computeBatSize = entries =>
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
// Format:
//
// 1. Footer (512)
// 2. Header (1024)
// 3. Unordered entries
// - BAT (batSize @ header.tableOffset)
// - Blocks (@ blockOffset(i))
// - bitmap (blockBitmapSize)
// - data (header.blockSize)
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
// 4. Footer (512 @ vhdSize - 512)
//
// Variables:
//
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
// - blockOffset(i) = bat[i] * sectorSize
// - nBlocks = ceil(footer.currentSize / header.blockSize)
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export default class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
constructor (handler, path) {
this._handler = handler
this._path = path
}
// =================================================================
// Read functions.
// =================================================================
_readStream (start, n) {
return this._handler.createReadStream(this._path, {
start,
end: start + n - 1, // end is inclusive
})
}
_read (start, n) {
return this._readStream(start, n)
.then(getStream.buffer)
.then(buf => {
assert.equal(buf.length, n)
return buf
})
}
containsBlock (id) {
return this._getBatEntry(id) !== BLOCK_UNUSED
}
// Returns the first address after metadata. (In bytes)
getEndOfHeaders () {
const { header } = this
let end = FOOTER_SIZE + HEADER_SIZE
// Max(end, block allocation table end)
end = Math.max(end, header.tableOffset + this.batSize)
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== PLATFORM_NONE) {
end = Math.max(
end,
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
)
}
}
debug(`End of headers: ${end}.`)
return end
}
// Returns the first sector after data.
getEndOfData () {
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
for (let i = 0; i < maxTableEntries; i++) {
const blockAddr = this._getBatEntry(i)
if (blockAddr !== BLOCK_UNUSED) {
end = Math.max(end, blockAddr + fullBlockSize)
}
}
debug(`End of data: ${end}.`)
return sectorsToBytes(end)
}
// TODO: extract the checks into reusable functions:
// - better human reporting
// - auto repair if possible
async readHeaderAndFooter (checkSecondFooter = true) {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
assert(
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
'footer1 !== footer2'
)
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock =
header.blockSize / SECTOR_SIZE)
// Compute bitmap size in sectors.
// Default: 1.
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
sectorsPerBlock >> 3
))
// Full block size => data block size + bitmap size.
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
// In bytes.
// Default: 512.
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockAllocationTable () {
const { header } = this
this.blockTable = await this._read(
header.tableOffset,
header.maxTableEntries * 4
)
}
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * 4)
}
_readBlock (blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
}
return this._read(
sectorsToBytes(blockAddr),
onlyBitmap ? this.bitmapSize : this.fullBlockSize
).then(
buf =>
onlyBitmap
? { id: blockId, bitmap: buf }
: {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
)
}
// get the identifiers and first sectors of the first and last block
// in the file
//
_getFirstAndLastBlocks () {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (i === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
// Write a buffer/stream at a given position in a vhd file.
async _write (data, offset) {
debug(
`_write offset=${offset} size=${
Buffer.isBuffer(data) ? data.length : '???'
}`
)
// TODO: could probably be merged in remote handlers.
const stream = await this._handler.createOutputStream(this._path, {
flags: 'r+',
start: offset,
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
async _freeFirstBlockSpace (spaceNeededBytes) {
try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const stream = await this._readStream(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(stream, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
}
} catch (e) {
if (!e.noBlock) {
throw e
}
}
}
async ensureBatSize (entries) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= entries) {
return
}
const newBatSize = computeBatSize(entries)
await this._freeFirstBlockSpace(newBatSize - this.batSize)
const maxTableEntries = (header.maxTableEntries = entries)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
}
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * 4
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(
constantStream([0], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
this._setBatEntry(blockId, blockAddr),
])
return blockAddr
}
// Write a bitmap at a block address.
async writeBlockBitmap (blockAddr, bitmap) {
const { bitmapSize } = this
if (bitmap.length !== bitmapSize) {
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
}
const offset = sectorsToBytes(blockAddr)
debug(
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
'hex'
)})`
)
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async writeEntireBlock (block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
}
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
}
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`writeBlockSectors at ${offset} block=${
block.id
}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(parentBitmap, i)
}
await this.writeBlockBitmap(blockAddr, parentBitmap)
await this._write(
block.data.slice(
sectorsToBytes(beginSectorId),
sectorsToBytes(endSectorId)
),
sectorsToBytes(offset)
)
}
async coalesceBlock (child, blockId) {
const block = await child._readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
let parentBitmap = null
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
continue
}
let endSector = i + 1
// Count changed sectors.
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this.writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
}
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter (onlyEndFooter = false) {
const { footer } = this
const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(
`Write footer at: ${offset} (checksum=${
footer.checksum
}). (data=${rawFooter.toString('hex')})`
)
if (!onlyEndFooter) {
await this._write(rawFooter, 0)
}
await this._write(rawFooter, offset)
}
writeHeader () {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = FOOTER_SIZE
debug(
`Write header at: ${offset} (checksum=${
header.checksum
}). (data=${rawHeader.toString('hex')})`
)
return this._write(rawHeader, offset)
}
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
for (
let currentBlock = startBlock;
currentBlock <= lastBlock;
currentBlock++
) {
const offsetInBlockSectors = Math.max(
0,
offsetSectors - currentBlock * this.sectorsPerBlock
)
const endInBlockSectors = Math.min(
endBufferSectors - currentBlock * this.sectorsPerBlock,
this.sectorsPerBlock
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
SECTOR_SIZE,
buffer.length
)
let inputBuffer
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
} else {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * SECTOR_SIZE,
startInBuffer,
endInBuffer
)
}
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
)
}
await this.writeFooter()
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
firstLocatorOffset / SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
this.header.tableOffset += sectorsToBytes(deltaSectors)
await this._write(this.blockTable, this.header.tableOffset)
}
return firstLocatorOffset
}
async setUniqueParentLocator (fileNameString) {
const { header } = this
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
}
}

View File

@@ -29,13 +29,15 @@ exports.createOutputStream = path => {
exports.resolveRef = (xapi, type, refOrUuidOrNameLabel) =>
isOpaqueRef(refOrUuidOrNameLabel)
? refOrUuidOrNameLabel
: xapi.call(`${type}.get_by_uuid`, refOrUuidOrNameLabel).catch(
() => xapi.call(`${type}.get_by_name_label`, refOrUuidOrNameLabel).then(
refs => {
: xapi.call(`${type}.get_by_uuid`, refOrUuidOrNameLabel).catch(() =>
xapi
.call(`${type}.get_by_name_label`, refOrUuidOrNameLabel)
.then(refs => {
if (refs.length === 1) {
return refs[0]
}
throw new Error(`no single match for ${type} with name label ${refOrUuidOrNameLabel}`)
}
)
throw new Error(
`no single match for ${type} with name label ${refOrUuidOrNameLabel}`
)
})
)

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.16.4",
"version": "0.16.9",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [

View File

@@ -56,7 +56,7 @@ const main = async args => {
let auth
if (opts._.length > 1) {
const [ , user, password = await askPassword() ] = opts._
const [, user, password = await askPassword()] = opts._
auth = { user, password }
}
@@ -86,11 +86,11 @@ const main = async args => {
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {
fromCallback(cb => {
;fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value =>
isArray(value) ? Promise.all(value) : value
)::asCallback(cb)
})
.then(value => (isArray(value) ? Promise.all(value) : value))
::asCallback(cb)
})(repl.eval)
await eventToPromise(repl, 'exit')

View File

@@ -26,6 +26,8 @@ import {
delay as pDelay,
fromEvents,
lastly,
timeout as pTimeout,
TimeoutError,
} from 'promise-toolbox'
import autoTransport from './transports/auto'
@@ -34,6 +36,9 @@ const debug = createDebug('xen-api')
// ===================================================================
// in seconds
const EVENT_TIMEOUT = 60
// http://www.gnu.org/software/libc/manual/html_node/Error-Codes.html
const NETWORK_ERRORS = {
// Connection has been closed outside of our control.
@@ -51,11 +56,14 @@ const NETWORK_ERRORS = {
// Host is not reachable (does not respond).
EHOSTUNREACH: true,
// network is unreachable
ENETUNREACH: true,
// Connection configured timed out has been reach.
ETIMEDOUT: true,
}
const isNetworkError = ({code}) => NETWORK_ERRORS[code]
const isNetworkError = ({ code }) => NETWORK_ERRORS[code]
// -------------------------------------------------------------------
@@ -64,17 +72,17 @@ const XAPI_NETWORK_ERRORS = {
HOST_HAS_NO_MANAGEMENT_IP: true,
}
const isXapiNetworkError = ({code}) => XAPI_NETWORK_ERRORS[code]
const isXapiNetworkError = ({ code }) => XAPI_NETWORK_ERRORS[code]
// -------------------------------------------------------------------
const areEventsLost = ({code}) => code === 'EVENTS_LOST'
const areEventsLost = ({ code }) => code === 'EVENTS_LOST'
const isHostSlave = ({code}) => code === 'HOST_IS_SLAVE'
const isHostSlave = ({ code }) => code === 'HOST_IS_SLAVE'
const isMethodUnknown = ({code}) => code === 'MESSAGE_METHOD_UNKNOWN'
const isMethodUnknown = ({ code }) => code === 'MESSAGE_METHOD_UNKNOWN'
const isSessionInvalid = ({code}) => code === 'SESSION_INVALID'
const isSessionInvalid = ({ code }) => code === 'SESSION_INVALID'
// -------------------------------------------------------------------
@@ -93,8 +101,9 @@ class XapiError extends BaseError {
export const wrapError = error => {
let code, params
if (isArray(error)) { // < XenServer 7.3
[ code, ...params ] = error
if (isArray(error)) {
// < XenServer 7.3
;[code, ...params] = error
} else {
code = error.message
params = error.data
@@ -111,7 +120,7 @@ const parseUrl = url => {
throw new Error('invalid URL: ' + url)
}
const [ , protocol = 'https:', username, password, hostname, port ] = matches
const [, protocol = 'https:', username, password, hostname, port] = matches
return { protocol, username, password, hostname, port }
}
@@ -128,17 +137,15 @@ const {
const OPAQUE_REF_PREFIX = 'OpaqueRef:'
export const isOpaqueRef = value =>
typeof value === 'string' &&
startsWith(value, OPAQUE_REF_PREFIX)
typeof value === 'string' && startsWith(value, OPAQUE_REF_PREFIX)
// -------------------------------------------------------------------
const RE_READ_ONLY_METHOD = /^[^.]+\.get_/
const isReadOnlyCall = (method, args) => (
const isReadOnlyCall = (method, args) =>
args.length === 1 &&
isOpaqueRef(args[0]) &&
typeof args[0] === 'string' &&
RE_READ_ONLY_METHOD.test(method)
)
// Prepare values before passing them to the XenAPI:
//
@@ -175,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
// -------------------------------------------------------------------
const getTaskResult = (task, onSuccess, onFailure) => {
const getTaskResult = task => {
const { status } = task
if (status === 'cancelled') {
return [ onFailure(new Cancel('task canceled')) ]
return Promise.reject(new Cancel('task canceled'))
}
if (status === 'failure') {
return [ onFailure(wrapError(task.error_info)) ]
return Promise.reject(wrapError(task.error_info))
}
if (status === 'success') {
// the result might be:
// - empty string
// - an opaque reference
// - an XML-RPC value
return [ onSuccess(task.result) ]
return Promise.resolve(task.result)
}
}
@@ -209,7 +216,7 @@ export class Xapi extends EventEmitter {
this._pool = null
this._readOnly = Boolean(opts.readOnly)
this._sessionId = null
const url = this._url = parseUrl(opts.url)
const url = (this._url = parseUrl(opts.url))
if (this._auth === undefined) {
const user = url.username
@@ -224,9 +231,7 @@ export class Xapi extends EventEmitter {
}
if (opts.watchEvents !== false) {
this._debounce = opts.debounce == null
? 200
: opts.debounce
this._debounce = opts.debounce == null ? 200 : opts.debounce
this._eventWatchers = createObject(null)
@@ -237,11 +242,11 @@ export class Xapi extends EventEmitter {
this._nTasks = 0
const objects = this._objects = new Collection()
const objects = (this._objects = new Collection())
objects.getKey = getKey
this._objectsByRefs = createObject(null)
this._objectsByRefs['OpaqueRef:NULL'] = null
this._objectsByRefs['OpaqueRef:NULL'] = undefined
this._taskWatchers = Object.create(null)
@@ -286,13 +291,7 @@ export class Xapi extends EventEmitter {
get status () {
const id = this._sessionId
return id
? (
id === CONNECTING
? CONNECTING
: CONNECTED
)
: DISCONNECTED
return id ? (id === CONNECTING ? CONNECTING : CONNECTED) : DISCONNECTED
}
get _humanId () {
@@ -305,36 +304,46 @@ export class Xapi extends EventEmitter {
barrier (ref) {
const eventWatchers = this._eventWatchers
if (eventWatchers === undefined) {
return Promise.reject(new Error('Xapi#barrier() requires events watching'))
return Promise.reject(
new Error('Xapi#barrier() requires events watching')
)
}
const key = `xo:barrier:${Math.random().toString(36).slice(2)}`
const key = `xo:barrier:${Math.random()
.toString(36)
.slice(2)}`
const poolRef = this._pool.$ref
const { promise, resolve } = defer()
eventWatchers[key] = resolve
return this._sessionCall(
'pool.add_to_other_config',
[ poolRef, key, '' ]
).then(() => promise.then(() => {
this._sessionCall('pool.remove_from_other_config', [ poolRef, key ]).catch(noop)
return this._sessionCall('pool.add_to_other_config', [
poolRef,
key,
'',
]).then(() =>
promise.then(() => {
this._sessionCall('pool.remove_from_other_config', [
poolRef,
key,
]).catch(noop)
if (ref === undefined) {
return
}
if (ref === undefined) {
return
}
// support legacy params (type, ref)
if (arguments.length === 2) {
ref = arguments[1]
}
// support legacy params (type, ref)
if (arguments.length === 2) {
ref = arguments[1]
}
return this.getObjectByRef(ref)
}))
return this.getObjectByRef(ref)
})
)
}
connect () {
const {status} = this
const { status } = this
if (status === CONNECTED) {
return Promise.reject(new Error('already connected'))
@@ -378,7 +387,7 @@ export class Xapi extends EventEmitter {
return Promise.reject(new Error('already disconnected'))
}
this._transportCall('session.logout', [ this._sessionId ]).catch(noop)
this._transportCall('session.logout', [this._sessionId]).catch(noop)
this._sessionId = null
@@ -400,22 +409,22 @@ export class Xapi extends EventEmitter {
return this._readOnly && !isReadOnlyCall(method, args)
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
: this._sessionCall(`Async.${method}`, args).then(taskRef => {
$cancelToken.promise.then(() => {
this._sessionCall('task.cancel', [taskRef]).catch(noop)
})
$cancelToken.promise.then(() => {
// TODO: do not trigger if the task is already over
this._sessionCall('task.cancel', [taskRef]).catch(noop)
})
return this.watchTask(taskRef)::lastly(() => {
this._sessionCall('task.destroy', [taskRef]).catch(noop)
return this.watchTask(taskRef)::lastly(() => {
this._sessionCall('task.destroy', [taskRef]).catch(noop)
})
})
})
}
// create a task and automatically destroy it when settled
//
// allowed even in read-only mode because it does not have impact on the
// XenServer and it's necessary for getResource()
createTask (nameLabel, nameDescription = '') {
if (this._readOnly) {
return Promise.reject(new Error('cannot create task in read only mode'))
}
const promise = this._sessionCall('task.create', [
nameLabel,
nameDescription,
@@ -434,19 +443,18 @@ export class Xapi extends EventEmitter {
// this lib), UUID (unique identifier that some objects have) or
// opaque reference (internal to XAPI).
getObject (idOrUuidOrRef, defaultValue) {
const object = typeof idOrUuidOrRef === 'string'
? (
// if there is an UUID, it is also the $id.
this._objects.all[idOrUuidOrRef] ||
this._objectsByRefs[idOrUuidOrRef]
)
: this._objects.all[idOrUuidOrRef.$id]
if (typeof idOrUuidOrRef === 'object') {
idOrUuidOrRef = idOrUuidOrRef.$id
}
if (object) return object
const object =
this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
if (object !== undefined) return object
if (arguments.length > 1) return defaultValue
throw new Error('there is not object can be matched to ' + idOrUuidOrRef)
throw new Error('no object with UUID or opaque ref: ' + idOrUuidOrRef)
}
// Returns the object for a given opaque reference (internal to
@@ -454,11 +462,11 @@ export class Xapi extends EventEmitter {
getObjectByRef (ref, defaultValue) {
const object = this._objectsByRefs[ref]
if (object) return object
if (object !== undefined) return object
if (arguments.length > 1) return defaultValue
throw new Error('there is no object with the ref ' + ref)
throw new Error('no object with opaque ref: ' + ref)
}
// Returns the object for a given UUID (unique identifier that some
@@ -471,7 +479,7 @@ export class Xapi extends EventEmitter {
if (arguments.length > 1) return defaultValue
throw new Error('there is no object with the UUID ' + uuid)
throw new Error('no object with UUID: ' + uuid)
}
getRecord (type, ref) {
@@ -479,158 +487,147 @@ export class Xapi extends EventEmitter {
}
@cancelable
getResource ($cancelToken, pathname, {
host,
query,
task,
}) {
return this._autoTask(
task,
`Xapi#getResource ${pathname}`
).then(taskRef => {
query = { ...query, session_id: this.sessionId }
let taskResult
if (taskRef !== undefined) {
query.task_id = taskRef
taskResult = this.watchTask(taskRef)
getResource ($cancelToken, pathname, { host, query, task }) {
return this._autoTask(task, `Xapi#getResource ${pathname}`).then(
taskRef => {
query = { ...query, session_id: this.sessionId }
let taskResult
if (taskRef !== undefined) {
query.task_id = taskRef
taskResult = this.watchTask(taskRef)
if (typeof $cancelToken.addHandler === 'function') {
$cancelToken.addHandler(() => taskResult)
if (typeof $cancelToken.addHandler === 'function') {
$cancelToken.addHandler(() => taskResult)
}
}
}
let promise = httpRequest(
$cancelToken,
this._url,
host && {
hostname: this.getObject(host).address,
},
{
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
}
)
if (taskResult !== undefined) {
promise = promise.then(response => {
response.task = taskResult
return response
})
}
return promise
})
}
@cancelable
putResource ($cancelToken, body, pathname, {
host,
query,
task,
} = {}) {
if (this._readOnly) {
return Promise.reject(new Error(new Error('cannot put resource in read only mode')))
}
return this._autoTask(
task,
`Xapi#putResource ${pathname}`
).then(taskRef => {
query = { ...query, session_id: this.sessionId }
let taskResult
if (taskRef !== undefined) {
query.task_id = taskRef
taskResult = this.watchTask(taskRef)
if (typeof $cancelToken.addHandler === 'function') {
$cancelToken.addHandler(() => taskResult)
}
}
const headers = {}
// Xen API does not support chunk encoding.
const isStream = typeof body.pipe === 'function'
const { length } = body
if (isStream && length === undefined) {
// add a fake huge content length (1 PiB)
headers['content-length'] = '1125899906842624'
}
const doRequest = override => httpRequest.put(
$cancelToken,
this._url,
host && {
hostname: this.getObject(host).address,
},
{
body,
headers,
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
},
override
)
const promise = isStream
// dummy request to probe for a redirection before consuming body
? doRequest({
body: '',
// omit task_id because this request will fail on purpose
query: 'task_id' in query
? omit(query, 'task_id')
: query,
maxRedirects: 0,
}).then(
response => {
response.req.abort()
return doRequest()
let promise = httpRequest(
$cancelToken,
this._url,
host && {
hostname: this.getObject(host).address,
},
error => {
let response
if (error != null && (response = error.response) != null) {
response.req.abort()
const { headers: { location }, statusCode } = response
if (statusCode === 302 && location !== undefined) {
return doRequest(location)
}
}
throw error
{
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
}
)
// http-request-plus correctly handle redirects if body is not a stream
: doRequest()
return promise.then(response => {
const { req } = response
if (taskResult !== undefined) {
taskResult = taskResult.catch(error => {
error.url = response.url
throw error
promise = promise.then(response => {
response.task = taskResult
return response
})
}
if (req.finished) {
req.abort()
return taskResult
return promise
}
)
}
@cancelable
putResource ($cancelToken, body, pathname, { host, query, task } = {}) {
if (this._readOnly) {
return Promise.reject(
new Error(new Error('cannot put resource in read only mode'))
)
}
return this._autoTask(task, `Xapi#putResource ${pathname}`).then(
taskRef => {
query = { ...query, session_id: this.sessionId }
let taskResult
if (taskRef !== undefined) {
query.task_id = taskRef
taskResult = this.watchTask(taskRef)
if (typeof $cancelToken.addHandler === 'function') {
$cancelToken.addHandler(() => taskResult)
}
}
return fromEvents(req, ['close', 'finish']).then(() => {
req.abort()
return taskResult
const headers = {}
// Xen API does not support chunk encoding.
const isStream = typeof body.pipe === 'function'
const { length } = body
if (isStream && length === undefined) {
// add a fake huge content length (1 PiB)
headers['content-length'] = '1125899906842624'
}
const doRequest = override =>
httpRequest.put(
$cancelToken,
this._url,
host && {
hostname: this.getObject(host).address,
},
{
body,
headers,
pathname,
query,
rejectUnauthorized: !this._allowUnauthorized,
},
override
)
// if a stream, sends a dummy request to probe for a
// redirection before consuming body
const promise = isStream
? doRequest({
body: '',
// omit task_id because this request will fail on purpose
query: 'task_id' in query ? omit(query, 'task_id') : query,
maxRedirects: 0,
}).then(
response => {
response.req.abort()
return doRequest()
},
error => {
let response
if (error != null && (response = error.response) != null) {
response.req.abort()
const { headers: { location }, statusCode } = response
if (statusCode === 302 && location !== undefined) {
return doRequest(location)
}
}
throw error
}
)
: doRequest()
return promise.then(response => {
const { req } = response
if (taskResult !== undefined) {
taskResult = taskResult.catch(error => {
error.url = response.url
throw error
})
}
if (req.finished) {
req.abort()
return taskResult
}
return fromEvents(req, ['close', 'finish']).then(() => {
req.abort()
return taskResult
})
})
})
})
}
)
}
watchTask (ref) {
@@ -645,11 +642,11 @@ export class Xapi extends EventEmitter {
let watcher = watchers[ref]
if (watcher === undefined) {
// sync check if the task is already settled
const task = this.objects.all[ref]
const task = this._objectsByRefs[ref]
if (task !== undefined) {
const result = getTaskResult(task, Promise.resolve, Promise.reject)
if (result) {
return result[0]
const result = getTaskResult(task)
if (result !== undefined) {
return result
}
}
@@ -692,22 +689,24 @@ export class Xapi extends EventEmitter {
newArgs.push.apply(newArgs, args)
}
return this._transportCall(method, newArgs)
::pCatch(isSessionInvalid, () => {
return this._transportCall(method, newArgs)::pCatch(
isSessionInvalid,
() => {
// XAPI is sometimes reinitialized and sessions are lost.
// Try to login again.
debug('%s: the session has been reinitialized', this._humanId)
this._sessionId = null
return this.connect().then(() => this._sessionCall(method, args))
})
}
)
} catch (error) {
return Promise.reject(error)
}
}
_addObject (type, ref, object) {
const {_objectsByRefs: objectsByRefs} = this
const { _objectsByRefs: objectsByRefs } = this
const reservedKeys = {
id: true,
@@ -715,9 +714,8 @@ export class Xapi extends EventEmitter {
ref: true,
type: true,
}
const getKey = (key, obj) => reservedKeys[key] && obj === object
? `$$${key}`
: `$${key}`
const getKey = (key, obj) =>
reservedKeys[key] && obj === object ? `$$${key}` : `$${key}`
// Creates resolved properties.
forEach(object, function resolveObject (value, key, object) {
@@ -736,7 +734,7 @@ export class Xapi extends EventEmitter {
} else if (isOpaqueRef(value[0])) {
// This is an array of refs.
defineProperty(object, getKey(key, object), {
get: () => freezeObject(map(value, (ref) => objectsByRefs[ref])),
get: () => freezeObject(map(value, ref => objectsByRefs[ref])),
})
freezeObject(value)
@@ -795,11 +793,12 @@ export class Xapi extends EventEmitter {
const taskWatchers = this._taskWatchers
const taskWatcher = taskWatchers[ref]
if (
taskWatcher !== undefined &&
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
) {
delete taskWatchers[ref]
if (taskWatcher !== undefined) {
const result = getTaskResult(object)
if (result !== undefined) {
taskWatcher.resolve(result)
delete taskWatchers[ref]
}
}
}
}
@@ -819,7 +818,10 @@ export class Xapi extends EventEmitter {
const taskWatchers = this._taskWatchers
const taskWatcher = taskWatchers[ref]
if (taskWatcher !== undefined) {
taskWatcher.reject(new Error('task has been destroyed before completion'))
const error = new Error('task has been destroyed before completion')
error.task = object
error.taskRef = ref
taskWatcher.reject(error)
delete taskWatchers[ref]
}
}
@@ -836,40 +838,48 @@ export class Xapi extends EventEmitter {
}
_watchEvents () {
const loop = () => this.status === CONNECTED && this._sessionCall('event.from', [
['*'],
this._fromToken,
60 + 0.1, // Force float.
]).then(onSuccess, onFailure)
const loop = () =>
this.status === CONNECTED &&
this._sessionCall('event.from', [
['*'],
this._fromToken,
EVENT_TIMEOUT + 0.1, // Force float.
])
::pTimeout(EVENT_TIMEOUT * 1.1e3) // 10% longer than the XenAPI timeout
.then(onSuccess, onFailure)
const onSuccess = ({ events, token, valid_ref_counts: { task } }) => {
this._fromToken = token
this._processEvents(events)
if (task !== this._nTasks) {
this._sessionCall('task.get_all_records').then(tasks => {
const toRemove = new Set()
forEach(this.objects.all, object => {
if (object.$type === 'task') {
toRemove.add(object.$ref)
}
this._sessionCall('task.get_all_records')
.then(tasks => {
const toRemove = new Set()
forEach(this.objects.all, object => {
if (object.$type === 'task') {
toRemove.add(object.$ref)
}
})
forEach(tasks, (task, ref) => {
toRemove.delete(ref)
this._addObject('task', ref, task)
})
toRemove.forEach(ref => {
this._removeObject('task', ref)
})
})
forEach(tasks, (task, ref) => {
toRemove.delete(ref)
this._addObject('task', ref, task)
})
toRemove.forEach(ref => {
this._removeObject('task', ref)
})
}).catch(noop)
.catch(noop)
}
const debounce = this._debounce
return debounce != null
? pDelay(debounce).then(loop)
: loop()
return debounce != null ? pDelay(debounce).then(loop) : loop()
}
const onFailure = error => {
if (error instanceof TimeoutError) {
return loop()
}
if (areEventsLost(error)) {
this._fromToken = ''
this._objects.clear()
@@ -906,41 +916,43 @@ export class Xapi extends EventEmitter {
::/\.get_all_records$/.test
)
return Promise.all(map(
getAllRecordsMethods,
method => this._sessionCall(method).then(
objects => {
const type = method.slice(0, method.indexOf('.')).toLowerCase()
forEach(objects, (object, ref) => {
this._addObject(type, ref, object)
})
},
error => {
if (error.code !== 'MESSAGE_REMOVED') {
throw error
return Promise.all(
map(getAllRecordsMethods, method =>
this._sessionCall(method).then(
objects => {
const type = method.slice(0, method.indexOf('.')).toLowerCase()
forEach(objects, (object, ref) => {
this._addObject(type, ref, object)
})
},
error => {
if (error.code !== 'MESSAGE_REMOVED') {
throw error
}
}
}
)
)
))
)
})
}
const watchEvents = () => this._sessionCall('event.register', [ ['*'] ]).then(loop)
const watchEvents = () =>
this._sessionCall('event.register', [['*']]).then(loop)
const loop = () => this.status === CONNECTED && this._sessionCall('event.next').then(onSuccess, onFailure)
const loop = () =>
this.status === CONNECTED &&
this._sessionCall('event.next').then(onSuccess, onFailure)
const onSuccess = events => {
this._processEvents(events)
const debounce = this._debounce
return debounce == null
? loop()
: pDelay(debounce).then(loop)
return debounce == null ? loop() : pDelay(debounce).then(loop)
}
const onFailure = error => {
if (areEventsLost(error)) {
return this._sessionCall('event.unregister', [ ['*'] ]).then(watchEvents)
return this._sessionCall('event.unregister', [['*']]).then(watchEvents)
}
throw error
@@ -950,85 +962,106 @@ export class Xapi extends EventEmitter {
}
}
Xapi.prototype._transportCall = reduce([
function (method, args) {
return this._call(method, args).catch(error => {
if (!(error instanceof Error)) {
error = wrapError(error)
}
error.method = method
throw error
})
},
call => function () {
let iterator // lazily created
const loop = () => call.apply(this, arguments)
::pCatch(isNetworkError, isXapiNetworkError, error => {
if (iterator === undefined) {
iterator = fibonacci().clamp(undefined, 60).take(10).toMs()
Xapi.prototype._transportCall = reduce(
[
function (method, args) {
return this._call(method, args).catch(error => {
if (!(error instanceof Error)) {
error = wrapError(error)
}
const cursor = iterator.next()
if (!cursor.done) {
// TODO: ability to cancel the connection
// TODO: ability to force immediate reconnection
const delay = cursor.value
debug('%s: network error %s, next try in %s ms', this._humanId, error.code, delay)
return pDelay(delay).then(loop)
}
debug('%s: network error %s, aborting', this._humanId, error.code)
// mark as disconnected
this.disconnect()::pCatch(noop)
error.method = method
throw error
})
return loop()
},
call => function loop () {
return call.apply(this, arguments)
::pCatch(isHostSlave, ({params: [master]}) => {
debug('%s: host is slave, attempting to connect at %s', this._humanId, master)
},
call =>
function () {
let iterator // lazily created
const loop = () =>
call
.apply(this, arguments)
::pCatch(isNetworkError, isXapiNetworkError, error => {
if (iterator === undefined) {
iterator = fibonacci()
.clamp(undefined, 60)
.take(10)
.toMs()
}
const newUrl = {
...this._url,
hostname: master,
}
this.emit('redirect', newUrl)
this._url = newUrl
const cursor = iterator.next()
if (!cursor.done) {
// TODO: ability to cancel the connection
// TODO: ability to force immediate reconnection
return loop.apply(this, arguments)
})
},
call => function (method) {
const startTime = Date.now()
return call.apply(this, arguments).then(
result => {
debug(
'%s: %s(...) [%s] ==> %s',
this._humanId,
method,
ms(Date.now() - startTime),
kindOf(result)
)
return result
const delay = cursor.value
debug(
'%s: network error %s, next try in %s ms',
this._humanId,
error.code,
delay
)
return pDelay(delay).then(loop)
}
debug('%s: network error %s, aborting', this._humanId, error.code)
// mark as disconnected
this.disconnect()::pCatch(noop)
throw error
})
return loop()
},
error => {
debug(
'%s: %s(...) [%s] =!> %s',
this._humanId,
method,
ms(Date.now() - startTime),
error
call =>
function loop () {
return call
.apply(this, arguments)
::pCatch(isHostSlave, ({ params: [master] }) => {
debug(
'%s: host is slave, attempting to connect at %s',
this._humanId,
master
)
const newUrl = {
...this._url,
hostname: master,
}
this.emit('redirect', newUrl)
this._url = newUrl
return loop.apply(this, arguments)
})
},
call =>
function (method) {
const startTime = Date.now()
return call.apply(this, arguments).then(
result => {
debug(
'%s: %s(...) [%s] ==> %s',
this._humanId,
method,
ms(Date.now() - startTime),
kindOf(result)
)
return result
},
error => {
debug(
'%s: %s(...) [%s] =!> %s',
this._humanId,
method,
ms(Date.now() - startTime),
error
)
throw error
}
)
throw error
}
)
},
], (call, decorator) => decorator(call))
},
],
(call, decorator) => decorator(call)
)
// ===================================================================

View File

@@ -5,7 +5,7 @@ import { delay as pDelay } from 'promise-toolbox'
import { createClient } from './'
const xapi = (() => {
const [ , , url, user, password ] = process.argv
const [, , url, user, password] = process.argv
return createClient({
auth: { user, password },
@@ -14,16 +14,19 @@ const xapi = (() => {
})
})()
xapi.connect()
xapi
.connect()
// Get the pool record's ref.
.then(() => xapi.call('pool.get_all'))
// Injects lots of events.
.then(([ poolRef ]) => {
const loop = () => xapi.call('event.inject', 'pool', poolRef)
::pDelay(10) // A small delay is required to avoid overloading the Xen API.
.then(loop)
.then(([poolRef]) => {
const loop = () =>
xapi
.call('event.inject', 'pool', poolRef)
::pDelay(10) // A small delay is required to avoid overloading the Xen API.
.then(loop)
return loop()
})

View File

@@ -14,7 +14,7 @@ setInterval(() => {
)
}, 1e2)
const [ , , url, user, password ] = process.argv
const [, , url, user, password] = process.argv
createClient({
auth: { user, password },
readOnly: true,

View File

@@ -3,7 +3,7 @@ import xmlRpc from './xml-rpc'
import xmlRpcJson from './xml-rpc-json'
import { UnsupportedTransport } from './_utils'
const factories = [ jsonRpc, xmlRpcJson, xmlRpc ]
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
const { length } = factories
export default opts => {
@@ -14,18 +14,18 @@ export default opts => {
const current = factories[i++](opts)
if (i < length) {
const currentI = i
call = (method, args) => current(method, args).catch(
error => {
call = (method, args) =>
current(method, args).catch(error => {
if (error instanceof UnsupportedTransport) {
if (currentI === i) { // not changed yet
if (currentI === i) {
// not changed yet
create()
}
return call(method, args)
}
throw error
}
)
})
} else {
call = current
}

View File

@@ -4,35 +4,40 @@ import { format, parse } from 'json-rpc-protocol'
import { UnsupportedTransport } from './_utils'
export default ({ allowUnauthorized, url }) => {
return (method, args) => httpRequestPlus.post(url, {
rejectUnauthorized: !allowUnauthorized,
body: format.request(0, method, args),
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
},
path: '/jsonrpc',
}).readAll('utf8').then(
text => {
let response
try {
response = parse(text)
} catch (error) {
throw new UnsupportedTransport()
}
return (method, args) =>
httpRequestPlus
.post(url, {
rejectUnauthorized: !allowUnauthorized,
body: format.request(0, method, args),
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
},
path: '/jsonrpc',
})
.readAll('utf8')
.then(
text => {
let response
try {
response = parse(text)
} catch (error) {
throw new UnsupportedTransport()
}
if (response.type === 'response') {
return response.result
}
if (response.type === 'response') {
return response.result
}
throw response.error
},
error => {
if (error.response !== undefined) { // HTTP error
throw new UnsupportedTransport()
}
throw response.error
},
error => {
if (error.response !== undefined) {
// HTTP error
throw new UnsupportedTransport()
}
throw error
}
)
throw error
}
)
}

View File

@@ -20,10 +20,7 @@ const SPECIAL_CHARS = {
'\r': '\\r',
'\t': '\\t',
}
const SPECIAL_CHARS_RE = new RegExp(
Object.keys(SPECIAL_CHARS).join('|'),
'g'
)
const SPECIAL_CHARS_RE = new RegExp(Object.keys(SPECIAL_CHARS).join('|'), 'g')
const parseResult = result => {
const status = result.Status
@@ -78,11 +75,7 @@ export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
const client = (
protocol === 'https:'
? createSecureClient
: createClient
)({
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
path: '/json',
port,
@@ -90,8 +83,5 @@ export default ({
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(
parseResult,
logError
)
return (method, args) => call(method, args).then(parseResult, logError)
}

View File

@@ -34,19 +34,12 @@ export default ({
allowUnauthorized,
url: { hostname, path, port, protocol },
}) => {
const client = (
protocol === 'https:'
? createSecureClient
: createClient
)({
const client = (protocol === 'https:' ? createSecureClient : createClient)({
host: hostname,
port,
rejectUnauthorized: !allowUnauthorized,
})
const call = promisify(client.methodCall, client)
return (method, args) => call(method, args).then(
parseResult,
logError
)
return (method, args) => call(method, args).then(parseResult, logError)
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-acl-resolver",
"version": "0.2.3",
"version": "0.2.4",
"license": "ISC",
"description": "Xen-Orchestra internal: do ACLs resolution",
"keywords": [],

View File

@@ -8,7 +8,8 @@ let getObject
const authorized = () => true // eslint-disable-line no-unused-vars
const forbiddden = () => false // eslint-disable-line no-unused-vars
const and = (...checkers) => (object, permission) => { // eslint-disable-line no-unused-vars
// eslint-disable-next-line no-unused-vars
const and = (...checkers) => (object, permission) => {
for (const checker of checkers) {
if (!checker(object, permission)) {
return false
@@ -17,7 +18,8 @@ const and = (...checkers) => (object, permission) => { // eslint-disable-line no
return true
}
const or = (...checkers) => (object, permission) => { // eslint-disable-line no-unused-vars
// eslint-disable-next-line no-unused-vars
const or = (...checkers) => (object, permission) => {
for (const checker of checkers) {
if (checker(object, permission)) {
return true
@@ -28,7 +30,7 @@ const or = (...checkers) => (object, permission) => { // eslint-disable-line no-
// -------------------------------------------------------------------
const checkMember = (memberName) => (object, permission) => {
const checkMember = memberName => (object, permission) => {
const member = object[memberName]
return member !== object.id && checkAuthorization(member, permission)
}
@@ -36,10 +38,7 @@ const checkMember = (memberName) => (object, permission) => {
const checkSelf = ({ id }, permission) => {
const permissionsForObject = permissionsByObject[id]
return (
permissionsForObject &&
permissionsForObject[permission]
)
return permissionsForObject && permissionsForObject[permission]
}
// ===================================================================
@@ -51,7 +50,9 @@ const checkAuthorizationByTypes = {
network: or(checkSelf, checkMember('$pool')),
SR: or(checkSelf, checkMember('$pool')),
PIF: checkMember('$host'),
SR: or(checkSelf, checkMember('$container')),
task: checkMember('$host'),
@@ -102,12 +103,7 @@ function checkAuthorization (objectId, permission) {
// -------------------------------------------------------------------
export default (
permissionsByObject_,
getObject_,
permissions,
permission
) => {
export default (permissionsByObject_, getObject_, permissions, permission) => {
// Assign global variables.
permissionsByObject = permissionsByObject_
getObject = getObject_

View File

@@ -1,25 +1,3 @@
const { NODE_ENV = 'development' } = process.env
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
module.exports = {
comments: !__PROD__,
compact: __PROD__,
ignore: __TEST__ ? undefined : [ /\.spec\.js$/ ],
plugins: ['lodash'],
presets: [
[
'@babel/env',
{
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? { node: '6' }
: { node: 'current' },
useBuiltIns: 'usage',
},
],
'@babel/flow',
],
}
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -105,6 +105,12 @@ encoding by prefixing with `json:`:
> xo-cli foo.bar baz='json:[1, 2, 3]'
```
##### Configuration export
```
> xo-cli xo.exportConfig @=config.json
```
##### VM export
```

View File

@@ -28,7 +28,7 @@
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "7.0.0-beta.39",
"@babel/polyfill": "7.0.0-beta.49",
"bluebird": "^3.5.1",
"chalk": "^2.2.0",
"event-to-promise": "^0.8.0",
@@ -49,10 +49,10 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.39",
"@babel/core": "7.0.0-beta.39",
"@babel/preset-env": "7.0.0-beta.39",
"@babel/preset-flow": "7.0.0-beta.39",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
@@ -62,7 +62,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"pretest": "flow status"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -19,11 +19,13 @@ const configFile = configPath + '/config.json'
// ===================================================================
const load = exports.load = function () {
return readFile(configFile).then(JSON.parse).catch(function () {
return {}
})
}
const load = (exports.load = function () {
return readFile(configFile)
.then(JSON.parse)
.catch(function () {
return {}
})
})
exports.get = function (path) {
return load().then(function (config) {
@@ -31,11 +33,11 @@ exports.get = function (path) {
})
}
const save = exports.save = function (config) {
const save = (exports.save = function (config) {
return mkdirp(configPath).then(function () {
return writeFile(configFile, JSON.stringify(config))
})
}
})
exports.set = function (data) {
return load().then(function (config) {

View File

@@ -108,14 +108,16 @@ const humanFormatOpts = {
function printProgress (progress) {
if (progress.length) {
console.warn('%s% of %s @ %s/s - ETA %s',
console.warn(
'%s% of %s @ %s/s - ETA %s',
Math.round(progress.percentage),
humanFormat(progress.length, humanFormatOpts),
humanFormat(progress.speed, humanFormatOpts),
prettyMs(progress.eta * 1e3)
)
} else {
console.warn('%s @ %s/s',
console.warn(
'%s @ %s/s',
humanFormat(progress.transferred, humanFormatOpts),
humanFormat(progress.speed, humanFormatOpts)
)
@@ -130,8 +132,10 @@ function wrap (val) {
// ===================================================================
const help = wrap((function (pkg) {
return require('strip-indent')(`
const help = wrap(
(function (pkg) {
return require('strip-indent')(
`
Usage:
$name --register [--expiresIn duration] <XO-Server URL> <username> [<password>]
@@ -162,18 +166,20 @@ const help = wrap((function (pkg) {
Executes a command on the current XO instance.
$name v$version
`).replace(/<([^>]+)>|\$(\w+)/g, function (_, arg, key) {
if (arg) {
return '<' + chalk.yellow(arg) + '>'
}
`
).replace(/<([^>]+)>|\$(\w+)/g, function (_, arg, key) {
if (arg) {
return '<' + chalk.yellow(arg) + '>'
}
if (key === 'name') {
return chalk.bold(pkg[key])
}
if (key === 'name') {
return chalk.bold(pkg[key])
}
return pkg[key]
})
})(require('../package')))
return pkg[key]
})
})(require('../package'))
)
// -------------------------------------------------------------------
@@ -230,10 +236,7 @@ async function register (args) {
exports.register = register
function unregister () {
return config.unset([
'server',
'token',
])
return config.unset(['server', 'token'])
}
exports.unregister = unregister
@@ -284,11 +287,7 @@ async function listCommands (args) {
str.push(
name,
'=<',
type == null
? 'unknown type'
: isArray(type)
? type.join('|')
: type,
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
'>'
)
@@ -329,6 +328,15 @@ async function listObjects (args) {
}
exports.listObjects = listObjects
function ensurePathParam (method, value) {
if (typeof value !== 'string') {
const error =
method +
' requires the @ parameter to be a path (e.g. @=/tmp/config.json)'
throw error
}
}
async function call (args) {
if (!args.length) {
throw new Error('missing command name')
@@ -347,31 +355,33 @@ async function call (args) {
const result = await xo.call(method, params)
let keys, key, url
if (
isObject(result) &&
(keys = getKeys(result)).length === 1
) {
if (isObject(result) && (keys = getKeys(result)).length === 1) {
key = keys[0]
if (key === '$getFrom') {
ensurePathParam(method, file)
url = resolveUrl(baseUrl, result[key])
const output = createWriteStream(file)
const progress = progressStream({ time: 1e3 }, printProgress)
return eventToPromise(nicePipe([
got.stream(url).on('response', function (response) {
const length = response.headers['content-length']
if (length !== undefined) {
progress.length(length)
}
}),
progress,
output,
]), 'finish')
return eventToPromise(
nicePipe([
got.stream(url).on('response', function (response) {
const length = response.headers['content-length']
if (length !== undefined) {
progress.length(length)
}
}),
progress,
output,
]),
'finish'
)
}
if (key === '$sendTo') {
ensurePathParam(method, file)
url = resolveUrl(baseUrl, result[key])
const stats = await stat(file)
@@ -379,10 +389,13 @@ async function call (args) {
const input = nicePipe([
createReadStream(file),
progressStream({
length: length,
time: 1e3,
}, printProgress),
progressStream(
{
length: length,
time: 1e3,
},
printProgress
),
])
const response = await got.post(url, {

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -25,17 +25,16 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.18.0",
"@babel/runtime": "^7.0.0-beta.49",
"kindof": "^2.0.0",
"lodash": "^4.17.2",
"make-error": "^1.0.2"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"rimraf": "^2.6.1"
@@ -46,22 +45,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"babel": {
"plugins": [
"lodash",
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
],
"stage-3"
]
}
}

View File

@@ -1,17 +1,14 @@
import kindOf from 'kindof'
import {BaseError} from 'make-error'
import {EventEmitter} from 'events'
import {forEach} from 'lodash'
import { BaseError } from 'make-error'
import { EventEmitter } from 'events'
import { forEach } from 'lodash'
import isEmpty from './is-empty'
import isObject from './is-object'
// ===================================================================
const {
create: createObject,
prototype: { hasOwnProperty },
} = Object
const { create: createObject, prototype: { hasOwnProperty } } = Object
export const ACTION_ADD = 'add'
export const ACTION_UPDATE = 'update'
@@ -189,7 +186,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
createIndex (name, index) {
const {_indexes: indexes} = this
const { _indexes: indexes } = this
if (hasOwnProperty.call(indexes, name)) {
throw new DuplicateIndex(name)
}
@@ -201,7 +198,7 @@ export default class Collection extends EventEmitter {
}
deleteIndex (name) {
const {_indexes: indexes} = this
const { _indexes: indexes } = this
if (!hasOwnProperty.call(indexes, name)) {
throw new NoSuchIndex(name)
}
@@ -218,7 +215,7 @@ export default class Collection extends EventEmitter {
// -----------------------------------------------------------------
* [Symbol.iterator] () {
const {_items: items} = this
const { _items: items } = this
for (const key in items) {
yield [key, items[key]]
@@ -226,7 +223,7 @@ export default class Collection extends EventEmitter {
}
* keys () {
const {_items: items} = this
const { _items: items } = this
for (const key in items) {
yield key
@@ -234,7 +231,7 @@ export default class Collection extends EventEmitter {
}
* values () {
const {_items: items} = this
const { _items: items } = this
for (const key in items) {
yield items[key]
@@ -259,7 +256,7 @@ export default class Collection extends EventEmitter {
return
}
const {_buffer: buffer} = this
const { _buffer: buffer } = this
// Due to deduplication there could be nothing in the buffer.
if (isEmpty(buffer)) {
@@ -354,7 +351,8 @@ export default class Collection extends EventEmitter {
} else {
this._buffer[key] = ACTION_REMOVE
}
} else { // update
} else {
// update
if (!this._buffer[key]) {
this._buffer[key] = ACTION_UPDATE
}

View File

@@ -3,15 +3,15 @@
import eventToPromise from 'event-to-promise'
import { forEach } from 'lodash'
import Collection, {DuplicateItem, NoSuchItem} from './collection'
import Collection, { DuplicateItem, NoSuchItem } from './collection'
// ===================================================================
function waitTicks (n = 2) {
const {nextTick} = process
const { nextTick } = process
return new Promise(function (resolve) {
(function waitNextTick () {
;(function waitNextTick () {
// The first tick is handled by Promise#then()
if (--n) {
nextTick(waitNextTick)
@@ -34,16 +34,16 @@ describe('Collection', function () {
it('is iterable', function () {
const iterator = col[Symbol.iterator]()
expect(iterator.next()).toEqual({done: false, value: ['bar', 0]})
expect(iterator.next()).toEqual({done: true, value: undefined})
expect(iterator.next()).toEqual({ done: false, value: ['bar', 0] })
expect(iterator.next()).toEqual({ done: true, value: undefined })
})
describe('#keys()', function () {
it('returns an iterator over the keys', function () {
const iterator = col.keys()
expect(iterator.next()).toEqual({done: false, value: 'bar'})
expect(iterator.next()).toEqual({done: true, value: undefined})
expect(iterator.next()).toEqual({ done: false, value: 'bar' })
expect(iterator.next()).toEqual({ done: true, value: undefined })
})
})
@@ -51,8 +51,8 @@ describe('Collection', function () {
it('returns an iterator over the values', function () {
const iterator = col.values()
expect(iterator.next()).toEqual({done: false, value: 0})
expect(iterator.next()).toEqual({done: true, value: undefined})
expect(iterator.next()).toEqual({ done: false, value: 0 })
expect(iterator.next()).toEqual({ done: true, value: undefined })
})
})
@@ -70,7 +70,7 @@ describe('Collection', function () {
// Async event.
return eventToPromise(col, 'add').then(function (added) {
expect(Object.keys(added)).toEqual([ 'foo' ])
expect(Object.keys(added)).toEqual(['foo'])
expect(added.foo).toBe(true)
})
})
@@ -216,7 +216,7 @@ describe('Collection', function () {
})
it('accepts an object with an id property', function () {
col.unset({id: 'bar'})
col.unset({ id: 'bar' })
expect(col.has('bar')).toBe(false)
@@ -235,7 +235,7 @@ describe('Collection', function () {
return waitTicks().then(() => {
col.touch(foo)
return eventToPromise(col, 'update', (items) => {
return eventToPromise(col, 'update', items => {
expect(Object.keys(items)).toEqual(['foo'])
expect(items.foo).toBe(foo)
})
@@ -249,7 +249,7 @@ describe('Collection', function () {
expect(col.size).toBe(0)
return eventToPromise(col, 'remove').then((items) => {
return eventToPromise(col, 'remove').then(items => {
expect(Object.keys(items)).toEqual(['bar'])
expect(items.bar).toBeUndefined()
})
@@ -257,84 +257,69 @@ describe('Collection', function () {
})
describe('deduplicates events', function () {
forEach({
'add & update → add': [
[
['add', 'foo', 0],
['update', 'foo', 1],
],
{
add: {
foo: 1,
forEach(
{
'add & update → add': [
[['add', 'foo', 0], ['update', 'foo', 1]],
{
add: {
foo: 1,
},
},
},
],
'add & remove → ∅': [
[
['add', 'foo', 0],
['remove', 'foo'],
],
{},
],
'update & update → update': [
[
['update', 'bar', 1],
['update', 'bar', 2],
],
{
update: {
bar: 2,
'add & remove → ∅': [[['add', 'foo', 0], ['remove', 'foo']], {}],
'update & update → update': [
[['update', 'bar', 1], ['update', 'bar', 2]],
{
update: {
bar: 2,
},
},
},
],
'update & remove → remove': [
[
['update', 'bar', 1],
['remove', 'bar'],
],
{
remove: {
bar: undefined,
},
},
],
'remove & add → update': [
[
['remove', 'bar'],
['add', 'bar', 0],
'update & remove → remove': [
[['update', 'bar', 1], ['remove', 'bar']],
{
remove: {
bar: undefined,
},
},
],
{
update: {
bar: 0,
'remove & add → update': [
[['remove', 'bar'], ['add', 'bar', 0]],
{
update: {
bar: 0,
},
},
},
],
}, ([operations, results], label) => {
it(label, function () {
forEach(operations, ([method, ...args]) => {
col[method](...args)
})
],
},
([operations, results], label) => {
it(label, function () {
forEach(operations, ([method, ...args]) => {
col[method](...args)
})
const spies = Object.create(null)
forEach(['add', 'update', 'remove'], event => {
col.on(event, (spies[event] = jest.fn()))
})
const spies = Object.create(null)
forEach(['add', 'update', 'remove'], event => {
col.on(event, (spies[event] = jest.fn()))
})
return waitTicks().then(() => {
forEach(spies, (spy, event) => {
const items = results[event]
if (items) {
expect(spy.mock.calls).toEqual([ [ items ] ])
} else {
expect(spy).not.toHaveBeenCalled()
}
return waitTicks().then(() => {
forEach(spies, (spy, event) => {
const items = results[event]
if (items) {
expect(spy.mock.calls).toEqual([[items]])
} else {
expect(spy).not.toHaveBeenCalled()
}
})
})
})
})
})
}
)
})
})

View File

@@ -3,11 +3,7 @@ import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import isEmpty from './is-empty'
import NotImplemented from './not-implemented'
import {
ACTION_ADD,
ACTION_UPDATE,
ACTION_REMOVE,
} from './collection'
import { ACTION_ADD, ACTION_UPDATE, ACTION_REMOVE } from './collection'
// ===================================================================
@@ -34,7 +30,7 @@ export default class Index {
// Remove empty items lists.
sweep () {
const {_itemsByHash: itemsByHash} = this
const { _itemsByHash: itemsByHash } = this
for (const hash in itemsByHash) {
if (isEmpty(itemsByHash[hash])) {
delete itemsByHash[hash]
@@ -86,14 +82,11 @@ export default class Index {
const hash = computeHash(value, key)
if (hash != null) {
(
itemsByHash[hash] ||
;(itemsByHash[hash] ||
// FIXME: We do not use objects without prototype for now
// because it breaks Angular in xo-web, change it back when
// this is fixed.
(itemsByHash[hash] = {})
)[key] = value
(itemsByHash[hash] = {}))[key] = value
keysToHash[key] = hash
}
@@ -118,12 +111,9 @@ export default class Index {
// Inserts item into the new hash's list if any.
if (hash != null) {
(
itemsByHash[hash] ||
;(itemsByHash[hash] ||
// FIXME: idem: change back to Object.create(null)
(itemsByHash[hash] = {})
)[key] = value
(itemsByHash[hash] = {}))[key] = value
keysToHash[key] = hash
} else {
@@ -133,10 +123,7 @@ export default class Index {
}
_onRemove (items) {
const {
_itemsByHash: itemsByHash,
_keysToHash: keysToHash,
} = this
const { _itemsByHash: itemsByHash, _keysToHash: keysToHash } = this
for (const key in items) {
const prev = keysToHash[key]

View File

@@ -9,10 +9,10 @@ import Index from './index'
// ===================================================================
const waitTicks = (n = 2) => {
const {nextTick} = process
const { nextTick } = process
return new Promise(resolve => {
(function waitNextTick () {
;(function waitNextTick () {
// The first tick is handled by Promise#then()
if (--n) {
nextTick(waitNextTick)

View File

@@ -1,3 +1,3 @@
export default function isObject (value) {
return (value !== null) && (typeof value === 'object')
return value !== null && typeof value === 'object'
}

View File

@@ -1,4 +1,4 @@
import {BaseError} from 'make-error'
import { BaseError } from 'make-error'
export default class NotImplemented extends BaseError {
constructor (message) {

View File

@@ -2,11 +2,7 @@ import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import NotImplemented from './not-implemented'
import {
ACTION_ADD,
ACTION_UPDATE,
ACTION_REMOVE,
} from './collection'
import { ACTION_ADD, ACTION_UPDATE, ACTION_REMOVE } from './collection'
// ===================================================================
@@ -108,10 +104,7 @@ export default class UniqueIndex {
}
_onRemove (items) {
const {
_itemByHash: itemByHash,
_keysToHash: keysToHash,
} = this
const { _itemByHash: itemByHash, _keysToHash: keysToHash } = this
for (const key in items) {
const prev = keysToHash[key]

View File

@@ -9,10 +9,10 @@ import Index from './unique-index'
// ===================================================================
const waitTicks = (n = 2) => {
const {nextTick} = process
const { nextTick } = process
return new Promise(resolve => {
(function waitNextTick () {
;(function waitNextTick () {
// The first tick is handled by Promise#then()
if (--n) {
nextTick(waitNextTick)

View File

@@ -7,7 +7,7 @@ import View from './view'
// Create the collection.
const users = new Collection()
users.getKey = (user) => user.name
users.getKey = user => user.name
// Inserts some data.
users.add({

View File

@@ -54,7 +54,7 @@ export default class View extends Collection {
}
_onAdd (items) {
const {_predicate: predicate} = this
const { _predicate: predicate } = this
forEach(items, (value, key) => {
if (predicate(value, key, this)) {
@@ -67,7 +67,7 @@ export default class View extends Collection {
}
_onUpdate (items) {
const {_predicate: predicate} = this
const { _predicate: predicate } = this
forEach(items, (value, key) => {
if (predicate(value, key, this)) {

View File

@@ -10,36 +10,53 @@ const xo = new Xo({
url: 'localhost:9000',
})
xo.open().then(function () {
return xo.call('acl.get', {}).then(function (result) {
console.log('success:', result)
}).catch(function (error) {
console.log('failure:', error)
xo
.open()
.then(function () {
return xo
.call('acl.get', {})
.then(function (result) {
console.log('success:', result)
})
.catch(function (error) {
console.log('failure:', error)
})
})
}).then(function () {
return xo.signIn({
email: 'admin@admin.net',
password: 'admin',
}).then(function () {
console.log('connected as ', xo.user)
}).catch(function (error) {
console.log('failure:', error)
.then(function () {
return xo
.signIn({
email: 'admin@admin.net',
password: 'admin',
})
.then(function () {
console.log('connected as ', xo.user)
})
.catch(function (error) {
console.log('failure:', error)
})
})
}).then(function () {
return xo.signIn({
email: 'tom',
password: 'tom',
}).then(function () {
console.log('connected as', xo.user)
.then(function () {
return xo
.signIn({
email: 'tom',
password: 'tom',
})
.then(function () {
console.log('connected as', xo.user)
return xo.call('acl.get', {}).then(function (result) {
console.log('success:', result)
}).catch(function (error) {
console.log('failure:', error)
})
}).catch(function (error) {
console.log('failure', error)
return xo
.call('acl.get', {})
.then(function (result) {
console.log('success:', result)
})
.catch(function (error) {
console.log('failure:', error)
})
})
.catch(function (error) {
console.log('failure', error)
})
})
.then(function () {
return xo.close()
})
}).then(function () {
return xo.close()
})

View File

@@ -1,7 +1,4 @@
import JsonRpcWebSocketClient, {
OPEN,
CLOSED,
} from 'jsonrpc-websocket-client'
import JsonRpcWebSocketClient, { OPEN, CLOSED } from 'jsonrpc-websocket-client'
import { BaseError } from 'make-error'
import { startsWith } from 'lodash'
@@ -20,7 +17,7 @@ export default class Xo extends JsonRpcWebSocketClient {
const url = opts != null ? opts.url : '.'
super(`${url === '/' ? '' : url}/api/`)
this._credentials = (opts != null ? opts.credentials : null)
this._credentials = opts != null ? opts.credentials : null
this._user = null
this.on(OPEN, () => {
@@ -45,12 +42,13 @@ export default class Xo extends JsonRpcWebSocketClient {
}
const promise = super.call(method, args)
promise.retry = (predicate) => promise.catch((error) => {
i = (i || 0) + 1
if (predicate(error, i)) {
return this.call(method, args, i)
}
})
promise.retry = predicate =>
promise.catch(error => {
i = (i || 0) + 1
if (predicate(error, i)) {
return this.call(method, args, i)
}
})
return promise
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -27,10 +27,10 @@
"lodash": "^4.13.1"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"deep-freeze": "^0.0.1",
"rimraf": "^2.6.1"
@@ -40,23 +40,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"babel": {
"plugins": [
"lodash"
],
"presets": [
[
"env",
{
"targets": {
"browsers": "> 5%",
"node": 4
}
}
],
"stage-3"
]
"prepare": "yarn run build"
}
}

View File

@@ -3,10 +3,13 @@ import map from 'lodash/map'
import trim from 'lodash/trim'
import trimStart from 'lodash/trimStart'
const sanitizePath = (...paths) => filter(map(paths, s => s && filter(map(s.split('/'), trim)).join('/'))).join('/')
const sanitizePath = (...paths) =>
filter(map(paths, s => s && filter(map(s.split('/'), trim)).join('/'))).join(
'/'
)
export const parse = string => {
const object = { }
const object = {}
const [type, rest] = string.split('://')
if (type === 'file') {
@@ -36,7 +39,7 @@ export const parse = string => {
return object
}
export const format = ({type, host, path, username, password, domain}) => {
export const format = ({ type, host, path, username, password, domain }) => {
type === 'local' && (type = 'file')
let string = `${type}://`
if (type === 'nfs') {

View File

@@ -1,4 +1,4 @@
import {Strategy} from 'passport-github'
import { Strategy } from 'passport-github'
// ===================================================================
@@ -27,18 +27,23 @@ class AuthGitHubXoPlugin {
}
load () {
const {_xo: xo} = this
const { _xo: xo } = this
xo.registerPassportStrategy(new Strategy(this._conf, async (accessToken, refreshToken, profile, done) => {
try {
done(null, await xo.registerUser('github', profile.username))
} catch (error) {
done(error.message)
}
}))
xo.registerPassportStrategy(
new Strategy(
this._conf,
async (accessToken, refreshToken, profile, done) => {
try {
done(null, await xo.registerUser('github', profile.username))
} catch (error) {
done(error.message)
}
}
)
)
}
}
// ===================================================================
export default ({xo}) => new AuthGitHubXoPlugin(xo)
export default ({ xo }) => new AuthGitHubXoPlugin(xo)

Some files were not shown because too many files have changed in this diff Show More