Compare commits

...

128 Commits

Author SHA1 Message Date
Julien Fontanet
385c3eb563 feat(xo-vmdk-to-vhd): 0.1.1 2018-05-18 18:40:26 +02:00
Julien Fontanet
e56be51b45 chore(xo-server/backups-ng): remove incorrect TODO 2018-05-18 17:14:50 +02:00
Olivier Lambert
24ae65b254 fix(xo-server/sr.createNfs): nfsVersion → nfsOptions (#2904) 2018-05-18 16:28:02 +02:00
badrAZ
d5dffbacbd fix(xo-web/FormattedDuration): handle duration < 0 seconds (#2964) 2018-05-18 15:06:23 +02:00
Julien Fontanet
c6ae969a82 fix(xo-server/https): ask for passphrase (#2963)
Fixes #2962
2018-05-18 15:05:49 +02:00
Nicolas Raynaud
005a9fdc01 fix(xo-vmdk-to-vhd): various bugs (#2961) 2018-05-18 14:02:19 +02:00
Jerome Charaoui
f505d4d911 Fix SR creation when using options or NFSv4 (#2960) 2018-05-17 22:12:09 +02:00
badrAZ
8ada6b121e fix(backup-ng/logs): handle the case when transfer duration equals 0 (#2954) 2018-05-17 16:58:29 +02:00
Julien Fontanet
b9a87efb0d fix(xo-server/backupNg): dont fail on corrupted VHDs (#2957)
Corrupted VHD files (usually uncleaned temporary) could fail the job.
2018-05-17 11:27:02 +02:00
Pierre Donias
89485a82d2 feat(xo-web): make many objects' UUID copiable (#2955)
Fixes #2925

- host/tab-network
- pool/tab-network
- vm/tab-disks
- vm/tab-network
- vm/tab-snapshots
2018-05-16 17:39:47 +02:00
Pierre Donias
451f87c6b4 feat(xo-web/servers): allow unauthorized cert. when adding server (#2953)
Fixes #2926
2018-05-16 13:44:27 +02:00
Rajaa.BARHTAOUI
c3cb5a3221 feat(xo-server,xo-web): VM HA options (#2946)
Fixes #2917
2018-05-16 13:27:40 +02:00
Julien Fontanet
458609ed2e feat(xo-server): 5.19.6 2018-05-16 10:32:59 +02:00
Julien Fontanet
fcec8113f3 fix(xo-server/backupNg): await writeStream (#2951) 2018-05-16 10:32:38 +02:00
Julien Fontanet
ebbd882ee4 feat(xo-web): 5.19.4 2018-05-15 17:44:25 +02:00
Julien Fontanet
0506e19a66 chore(xo-server/backups-ng): update todo list 2018-05-15 17:44:09 +02:00
Pierre Donias
ecc62e4f54 fix(xo-web/xosan): install packs button condition (#2950) 2018-05-15 17:40:40 +02:00
Julien Fontanet
2b95eb4e4d feat(xo-web): 5.19.3 2018-05-15 16:11:53 +02:00
Julien Fontanet
bcde9e0f74 feat(xo-server): 5.19.5 2018-05-15 16:11:34 +02:00
Pierre Donias
114501ebc7 feat(XOSAN): allow user to update packs (#2782) 2018-05-15 16:11:04 +02:00
badrAZ
ebab7c0867 fix(backup-ng/logs): handle the case when transfer/merge duration equals 0 (#2949) 2018-05-15 16:10:17 +02:00
Julien Fontanet
0e2270fb6e feat(xo-web): 5.19.2 2018-05-15 14:46:33 +02:00
Julien Fontanet
593493ec0c feat(xo-server): 5.19.4 2018-05-15 14:46:07 +02:00
Julien Fontanet
d92898a806 feat(xo-vmdk-to-vhd): 0.1.0 2018-05-15 14:45:19 +02:00
Julien Fontanet
7890e46551 feat(xo-server-backup-reports): 0.11.0 2018-05-15 14:42:32 +02:00
Julien Fontanet
ef942a6209 feat(Backup NG): implrtment logs and reports (#2869) 2018-05-15 14:40:11 +02:00
Nicolas Raynaud
fdde916388 feat(xo-web/vms-import): redirect to VM or home page (#2942)
If a single VM has been imported, redirect to its page.

If multiple VMs has been imported, redirect to the homepage with all other VMs filtered out.
2018-05-14 17:42:11 +02:00
Julien Fontanet
31314d201b fix(xo-server/backupNg/delta): await deletion/merge 2018-05-14 15:38:11 +02:00
Julien Fontanet
a29a949c51 fix(xo-server/backupNg/delta): deleteFirst iff retention > 1 2018-05-14 15:37:09 +02:00
Julien Fontanet
cc1ce8c5f8 chore: update yarn.lock 2018-05-14 13:53:03 +02:00
Nicolas Raynaud
a21bf4ebe5 chore: major VHD code restructuring (#2808)
Related to #2792
2018-05-14 13:48:16 +02:00
Julien Fontanet
3d0420dbd9 fix(xo-server/backupNg): clean metadata on VM iself (#2945) 2018-05-14 11:47:34 +02:00
Julien Fontanet
04c74dd30f fix: missing bit of last commit 2018-05-11 20:17:02 +02:00
Julien Fontanet
2f256291ae fix(xo-server/backup legacy/delta import): autofix path (#2941)
Because the path might be incorrect and be `_full.vhd` instead of `_delta.vhd`.

I know…
2018-05-11 20:16:00 +02:00
Julien Fontanet
bcb66a4145 fix(xo-server/backup NG/listReplicatedVms): avoid templates and snapshots 2018-05-11 18:45:47 +02:00
Fabrice Marsaud
2d9368062e fix(xo-web/xoa-updater): dont block paying plans when updater is on error (#2939) 2018-05-11 17:18:24 +02:00
Pierre Donias
b110bacf61 fix(xo-server/patches): bulk install (#2935) 2018-05-09 17:32:11 +02:00
Julien Fontanet
78afdc0af5 feat(xo-web): 5.19.1 2018-05-07 13:19:59 +02:00
Julien Fontanet
ad6cd7985a feat(xo-server): 5.19.3 2018-05-07 13:07:21 +02:00
Julien Fontanet
a61661776d fix(xo-server/listVmBackupsNg): handle missing vhds field
This field is only present on delta backups
2018-05-07 11:02:14 +02:00
Julien Fontanet
1a9ebddcab fix(xo-server/listBackupNgPartitionFiles): missing await
Strange things though, it works in dev compilation mode…

Fixes #2929
2018-05-07 10:33:44 +02:00
Julien Fontanet
7ab907a854 feat(xo-server/backup NG): file restore (#2889) 2018-05-06 18:38:47 +02:00
Olivier Lambert
68a34f7cdb feat(changelog): update changelog 2018-05-05 12:05:56 +02:00
Rajaa.BARHTAOUI
da4ff3082d feat(xo-web/health): list VM snapshots related to non-existing backup jobs (#2899)
Fixes #2828
2018-05-04 15:59:11 +02:00
Rajaa.BARHTAOUI
9c05a59b5f feat(xo-web/SR/advanced): add VDI UUID in coalesce table (#2919)
Fixes #2903
2018-05-04 12:28:15 +02:00
Rajaa.BARHTAOUI
6780146505 feat(xo-web/patches): better "restart host" warnings (#2909)
Fixes #2866
2018-05-04 10:41:02 +02:00
Julien Fontanet
2758833fc6 feat(xo-server): 5.19.2 2018-05-03 19:12:48 +02:00
Julien Fontanet
2786d7ec46 fix(vhd/createSyntheticReadStream): sectorsPerBlock → sectorsPerBlockData 2018-05-03 19:11:01 +02:00
Julien Fontanet
945a2006c9 feat(xo-server/legacy backup/import): use synthetic stream (#2913) 2018-05-02 17:48:13 +02:00
badrAZ
b9e574e32f fix(SR/tab-stats): fix IOPS's and IOwait's values format (#2914) 2018-05-02 14:17:13 +02:00
Julien Fontanet
34f1ef1680 feat(xo-server): 5.19.1 2018-05-01 17:26:15 +02:00
Julien Fontanet
4ac4310bc1 fix(xo-server/importDeltaVm): remove extra return
It prevented the VBD creation in case of a new VDI.
2018-05-01 17:25:46 +02:00
Julien Fontanet
a10997ca66 feat(xo-web): 5.19.0 2018-05-01 16:13:20 +02:00
Julien Fontanet
0e52a4c7dc feat(xo-server): 5.19.0 2018-05-01 16:12:44 +02:00
Julien Fontanet
a4b3e22c2b feat(xo-server-perf-alert): 0.1.0 2018-05-01 16:10:10 +02:00
Julien Fontanet
441bd7c754 feat(xo-server-auth-saml): 0.5.2 2018-05-01 16:09:06 +02:00
badrAZ
ddbd32d1cb chore(xo-web/backup-ng/new): single effect to toggle modes (#2908) 2018-04-30 11:19:45 +02:00
Pierre Donias
a5b0cbeaea feat(xo-server-perf-alert): SR disk space (#2905) 2018-04-27 17:38:19 +02:00
Rajaa.BARHTAOUI
c6f3b2b1ce feat(xo-web/XOA update): display "Downgrade" when trial is over (#2845)
Fixes #1483
2018-04-27 10:05:27 +02:00
Pierre Donias
3d869d9fa1 chore(xo-web/health): remove irrelevant actions on VDIs (#2882) 2018-04-26 17:37:28 +02:00
Julien Fontanet
7a5229741f chore: disable tests on Node 9 due to upath error 2018-04-26 17:21:00 +02:00
Rajaa.BARHTAOUI
78e0c2d8fa feat(xo-web/SortedTable): support link actions (#2893) 2018-04-26 15:46:10 +02:00
Pierre Donias
5928984069 feat(xo-web/home): sort by container name (#2901)
Fixes #2680
2018-04-26 15:21:48 +02:00
Julien Fontanet
61a472f108 chore(xo-server/vhd/createReadStream): improve genericity (#2865)
It should now be pretty easy to make it work to generate a delta VHD, which should be very useful when mergin multiple deltas together (during deletion.
2018-04-24 18:06:58 +02:00
Julien Fontanet
e45f78ea20 fix(xo-web/backup-ng): delete backups sequentially (#2855)
- sequentially: to limit merge issues
- from newest to oldest: to avoid unnecessary merges
2018-04-23 16:35:34 +02:00
Olivier Lambert
b3ae9d88eb feat(vm): expose vendor device features in advanced tab. Fixes #2883 (#2894) 2018-04-23 15:02:40 +02:00
Pierre Donias
f7f26537be fix(xo-web/vm/network): bad import after file name change (#2892)
Fixes #2891

Introduced by 769c32a1b1
2018-04-23 14:08:56 +02:00
Julien Fontanet
96848fc6d4 fix(xo-server/importDeltaVm): create VBDs earlier (#2885)
To avoid orphan VDIs AMAP.
2018-04-20 17:33:36 +02:00
Julien Fontanet
51e6f0c79f feat(xo-server-usage-report): 0.4.2 2018-04-19 09:39:47 +02:00
badrAZ
4f94ad40b7 fix(xo-server-usage-report): handle missing metrics (#2880) 2018-04-18 16:30:30 +02:00
Pierre Donias
43e1eb9939 fix(xo-web/Ellipsis): handle patchedRender wrapping (#2881)
Also use Ellipsis on resource set name in home/VM view
2018-04-18 11:00:09 +02:00
Julien Fontanet
1f6d7de861 feat(xo-server-usage-report): 0.4.1 2018-04-17 14:03:32 +02:00
Julien Fontanet
bd623c2daf feat(xo-web): 5.18.3 2018-04-17 11:36:00 +02:00
Pierre Donias
40c71c2102 fix(xo-web/SelectSr): "pool" typo (#2878)
Fixes #2875
2018-04-17 11:05:56 +02:00
Nicolas Raynaud
72a1580eff fix(xo-server/vhd-merge.integ.spec): increase timeout (#2874) 2018-04-17 11:04:30 +02:00
Julien Fontanet
9e2404a0d7 feat(xo-web): 5.18.2 2018-04-16 17:32:24 +02:00
Julien Fontanet
7dd84d1518 feat(xo-server): 5.18.3 2018-04-16 17:29:02 +02:00
Julien Fontanet
d800db5d09 fix(xo-web/backup-ng/new): fix empty srs & remotes
Send `undefined` instead of `false`.
2018-04-16 17:26:27 +02:00
Julien Fontanet
2714ccff38 fix(xo-server/backupNg.{create,edit}Job): check srs param 2018-04-16 17:15:35 +02:00
Julien Fontanet
1d493e411b fix(xo-server/backups-ng): correctly detect delta exports
Fixes #2833
2018-04-16 16:54:55 +02:00
Julien Fontanet
2a0c222f2d chore(xo-server): use xen-api 0.16.9 2018-04-16 16:30:59 +02:00
Julien Fontanet
641d68de0e feat(xen-api): 0.16.9 2018-04-16 16:29:41 +02:00
Julien Fontanet
2dd0fd660b chore(xo-server/backups-ng): update todo list 2018-04-16 16:28:09 +02:00
badrAZ
bb5441c7bc feat(xo-web/SelectSr): add container name to SRs that have the same names (#2824)
Fixes #1762
2018-04-16 16:16:55 +02:00
badrAZ
eeea9e662b fix(xo-web/backup-ng/new): rename edit button and change cancel icon (#2858)
See #2711
2018-04-16 15:49:54 +02:00
badrAZ
8d4874e240 fix(xo-web/backupNg/new): make the default retention equals 1 (#2872)
See #2711
2018-04-16 15:27:55 +02:00
badrAZ
a8ba4a1a8e feat(xo-web): stats for SRs (#2847) 2018-04-16 14:40:00 +02:00
Julien Fontanet
0c027247ec fix(normalize-packages): homepage for scoped packages 2018-04-15 23:41:27 +02:00
badrAZ
164cb39c1b fix(xo-web/backup/new): schedules values can be null (#2773) 2018-04-13 17:10:03 +02:00
Julien Fontanet
52503de645 fix(xo-web/initial fetch): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Julien Fontanet
83b8b5de61 fix(xo-web/updater): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Rajaa.BARHTAOUI
3e326c4e62 feat(xo-web/updater): disable upgrade button when not needed (#2816)
Fixes #1594
2018-04-13 16:46:58 +02:00
Julien Fontanet
a6b0690416 fix(xo-server): unmanaged VDI snapshots are VDI-unmanaged 2018-04-13 11:36:10 +02:00
Julien Fontanet
dcd007c5c7 fix(xen-api): fix sync test in watchTask (#2868) 2018-04-12 18:02:51 +02:00
Julien Fontanet
eb090e4874 fix(xen-api): getObject* should not return null 2018-04-12 11:06:08 +02:00
Julien Fontanet
4b716584f7 feat(xo-server): 5.18.2 2018-04-11 17:48:59 +02:00
Julien Fontanet
4bc348f39f fix(xo-server/vhd/createReadStream): emit empty if missing sectors 2018-04-11 17:47:43 +02:00
Julien Fontanet
9c75992fe4 feat(xo-web): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
4bb2702ac5 feat(xo-server): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
ea8133cb41 fix(xo-server/vhd/createReadStream): handle unallocated blocks (#2859)
Fixes #2857
2018-04-11 17:24:46 +02:00
Pierre Donias
fc40c7b03d fix(xo-web/new SR): create button not showing (#2854)
Fixes #2853
2018-04-11 10:21:06 +02:00
Julien Fontanet
7fe5b66fdb feat(xo-server-auth-saml): log profile when no name found 2018-04-10 19:09:30 +02:00
Julien Fontanet
0f1d052493 chore: update dependencies 2018-04-09 18:11:53 +02:00
badrAZ
56a182f795 fix(xo-web/backup-ng/new): dont add a target more than once (#2849)
Fixes #2848
2018-04-09 17:22:38 +02:00
Julien Fontanet
e8da1b943b fix(xo-server/backups-ng): create all forks at the same time (#2842)
Fixes #2790
2018-04-09 16:42:05 +02:00
Julien Fontanet
3913b0eba1 feat(xen-api): 0.16.8 2018-04-09 13:58:52 +02:00
Julien Fontanet
7990e45095 fix(xen-api): allow UUIDs for ro calls 2018-04-09 13:56:47 +02:00
Julien Fontanet
a7068ec166 fix(xo-server/importDeltaVm): better network matching (#2834)
Fixes #2093
2018-04-07 01:00:19 +02:00
Pierre Donias
55b35ac0cf NFS version and options (#2841)
Add NFS version & NFS options. Fixes #2706
2018-04-06 17:46:18 +02:00
Julien Fontanet
a251f8ca75 fix(xo-server/backups-ng): don't remove startable VMs (#2840)
Fixes #2724
2018-04-06 17:12:36 +02:00
Rajaa.BARHTAOUI
172ce2c7a1 feat(xo-web/jobs/new): use SortedTable (#2670)
See #2416
2018-04-06 16:45:46 +02:00
Olivier Lambert
3cef668a75 feat(xo-web,xo-server): create HBA SR (#2836)
Fixes #1992
2018-04-06 16:01:48 +02:00
Olivier Lambert
e6deb29070 fix(SR): incorrect case in deviceConfig for iSCSI probe (#2839) 2018-04-06 15:03:04 +02:00
Olivier Lambert
51609d45a2 feat(xo-web,xo-server): expose VM Xen Tools version (#2838)
Fixes #2650
2018-04-06 14:26:44 +02:00
Rajaa.BARHTAOUI
5cb6dc6d92 feat(xo-web): create new disk from SR view (#2726)
Fixes #2229
2018-04-06 13:54:32 +02:00
Nicolas Raynaud
c5174a61b7 chore(xo-server/debounce): reduce test flakiness (#2831) 2018-04-06 10:14:25 +02:00
badrAZ
93e987982c fix(xo-web/logs): displays the correct calls state when the job is interrupted (#2734)
Fixes #2732
2018-04-05 16:46:43 +02:00
Julien Fontanet
fc421428fd fix: missing ESLint config changes 2018-04-05 16:15:26 +02:00
Julien Fontanet
7400bd657a chore: coding style fixes 2018-04-05 15:53:57 +02:00
Julien Fontanet
da62cba3f8 chore: update dependencies 2018-04-05 11:00:03 +02:00
Patrick Tully
461cc7e547 fix(xo-web/icons.scss): remove extra commas (#2817) 2018-04-05 10:57:18 +02:00
badrAZ
b898ed4785 feat(xo-server/xapi-stats): new implementation (#2648) 2018-04-04 14:20:30 +02:00
Julien Fontanet
149530e73f feat(cron): 1.0.3 2018-04-03 17:21:21 +02:00
Julien Fontanet
7e627c953e fix(cron): selecting the first sunday of the month 2018-04-03 17:21:21 +02:00
Pierre Donias
bc86984f19 chore(xo-server/createNetwork): set other_config.automatic to false (#2825)
Fixes #2818

If a network has its other_config.automatic value set to any value other than
false then XenCenter's New VM wizard will create a VIF connected to this network
See https://citrix.github.io/xenserver-sdk/#network
2018-04-03 15:32:39 +02:00
Julien Fontanet
e40f3acdd4 feat(xo-web): 5.18.0 2018-03-30 18:04:11 +02:00
Julien Fontanet
63d93224e0 feat(xo-server): 5.18.0 2018-03-30 18:03:53 +02:00
badrAZ
c87356c319 feat: ability to delete a default template (#2812)
Fixes #2666
2018-03-30 18:03:12 +02:00
177 changed files with 9199 additions and 6022 deletions

View File

@@ -12,9 +12,11 @@ module.exports = {
parser: 'babel-eslint',
rules: {
'comma-dangle': ['error', 'always-multiline'],
indent: 'off',
'no-var': 'error',
'node/no-extraneous-import': 'error',
'node/no-extraneous-require': 'error',
'prefer-const': 'error',
'react/jsx-indent': 'off',
},
}

2
.gitignore vendored
View File

@@ -8,6 +8,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/plot.dat
/packages/xo-server/.xo-server.*

View File

@@ -1,6 +1,6 @@
language: node_js
node_js:
- stable
#- stable # disable for now due to an issue of indirect dep upath with Node 9
- 8
- 6
@@ -12,6 +12,7 @@ addons:
packages:
- qemu-utils
- blktap-utils
- vmdk-stream-converter
before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash

View File

@@ -2,7 +2,7 @@
"private": true,
"name": "@xen-orchestra/babel-config",
"version": "0.0.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/babel-config",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "1.0.2",
"version": "1.0.3",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [
@@ -14,7 +14,7 @@
"scheduling",
"task"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
@@ -41,10 +41,10 @@
"moment-timezone": "^0.5.14"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/preset-flow": "7.0.0-beta.42",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -41,4 +41,8 @@ describe('next()', () => {
'no solutions found for this schedule'
)
})
it('select the first sunday of the month', () => {
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
})
})

View File

@@ -176,10 +176,10 @@ export default createParser({
range: [0, 11],
},
{
aliases: 'mon tue wen thu fri sat sun'.split(' '),
aliases: 'sun mon tue wen thu fri sat'.split(' '),
name: 'dayOfWeek',
post: value => (value === 0 ? 7 : value),
range: [1, 7],
post: value => (value === 7 ? 0 : value),
range: [0, 6],
},
],
presets: {

View File

@@ -23,7 +23,7 @@ describe('parse()', () => {
it('correctly parse days', () => {
expect(parse('* * * * mon,sun')).toEqual({
dayOfWeek: [1, 7],
dayOfWeek: [0, 1],
})
})
@@ -40,10 +40,10 @@ describe('parse()', () => {
it('dayOfWeek: 0 and 7 bind to sunday', () => {
expect(parse('* * * * 0')).toEqual({
dayOfWeek: [7],
dayOfWeek: [0],
})
expect(parse('* * * * 7')).toEqual({
dayOfWeek: [7],
dayOfWeek: [0],
})
})
})

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,54 @@
{
"name": "@xen-orchestra/fs",
"version": "0.0.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.44",
"@marsaud/smb2-promise": "^0.2.1",
"execa": "^0.10.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.9.5",
"through2": "^2.0.3",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -1,11 +1,11 @@
// @flow
import getStream from 'get-stream'
import { randomBytes } from 'crypto'
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
import { type Readable, type Writable } from 'stream'
import { fromEvent, ignoreErrors } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
type Data = Buffer | Readable | string
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
async test (): Promise<Object> {
const testFileName = `${Date.now()}.test`
const data = getPseudoRandomBytes(1024 * 1024)
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
let step = 'write'
try {
await this.outputFile(testFileName, data)
@@ -97,7 +97,7 @@ export default class RemoteHandlerAbstract {
}
_readFile (file: string, options?: Object): Promise<Buffer> {
return this.createReadStream(file, options).then(streamToBuffer)
return this.createReadStream(file, options).then(getStream.buffer)
}
async rename (

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import { getHandler } from '.'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test("fs test doesn't crash", async () => {
const handler = getHandler({ url: 'file://' + process.cwd() })
const result = await handler.test()
expect(result.success).toBeTruthy()
})

View File

@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerSmb from './smb'
export type { default as RemoteHandler } from './abstract'
export type Remote = { url: string }
const HANDLERS = {

View File

@@ -1,7 +1,9 @@
import Smb2 from '@marsaud/smb2-promise'
import { lastly as pFinally } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
import { noop, pFinally } from '../utils'
const noop = () => {}
// Normalize the error code for file not found.
const normalizeError = error => {

View File

@@ -1,9 +1,104 @@
# ChangeLog
## **5.20.0** (planned 2018-05-31)
### Enhancements
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
### Bugs
## **5.19.0** (2018-05-01)
### Enhancements
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
### Bugs
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
## **5.18.0** (2018-03-31)
### Enhancements
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
### Bugs
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
## **5.17.0** (2018-03-02)
### Enhancements
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
@@ -22,6 +117,9 @@
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
### Bugs
@@ -41,6 +139,7 @@
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
@@ -50,6 +149,8 @@
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
### Bugs
@@ -79,6 +180,7 @@
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
### Bugs

View File

@@ -1,4 +1,12 @@
declare module 'lodash' {
declare export function forEach<K, V>(
object: { [K]: V },
iteratee: (V, K) => void
): void
declare export function groupBy<K, V>(
object: { [K]: V },
iteratee: K | ((V, K) => string)
): { [string]: V[] }
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
declare export function isEmpty(mixed): boolean
declare export function keyBy<T>(array: T[], iteratee: string): boolean

View File

@@ -3,8 +3,11 @@ declare module 'promise-toolbox' {
declare export function defer<T>(): {|
promise: Promise<T>,
reject: T => void,
resolve: T => void
resolve: T => void,
|}
declare export function fromCallback<T>(
(cb: (error: any, value: T) => void) => void
): Promise<T>
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
declare export function ignoreErrors(): Promise<void>
declare export function timeout<T>(delay: number): Promise<T>

View File

@@ -1,6 +1,6 @@
{
"devDependencies": {
"@babel/register": "^7.0.0-beta.40",
"@babel/register": "^7.0.0-beta.44",
"babel-7-jest": "^21.3.2",
"babel-eslint": "^8.1.2",
"benchmark": "^2.1.4",
@@ -13,7 +13,7 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^3.0.1",
"exec-promise": "^0.7.0",
"flow-bin": "^0.68.0",
"flow-bin": "^0.69.0",
"globby": "^8.0.0",
"husky": "^0.14.3",
"jest": "^22.0.4",
@@ -34,16 +34,18 @@
"testEnvironment": "node",
"testPathIgnorePatterns": [
"/dist/",
"/xo-vmdk-to-vhd/",
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"transform": {
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
"/@xen-orchestra/fs/.+\\.jsx?$": "babel-7-jest",
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/vhd-lib/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-vmdk-to-vhd/.+\\.jsx?$": "babel-7-jest",
"\\.jsx?$": "babel-jest"
}
},

View File

@@ -30,9 +30,9 @@
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.1",
"rimraf": "^2.6.2"

View File

@@ -28,10 +28,10 @@
},
"dependencies": {},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/preset-flow": "7.0.0-beta.42",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-cli",
"version": "0.0.0",
"version": "0.0.1",
"license": "ISC",
"description": "",
"keywords": [],
@@ -26,10 +26,11 @@
"node": ">=4"
},
"dependencies": {
"struct-fu": "^1.2.0",
"@nraynaud/xo-fs": "^0.0.5",
"@xen-orchestra/fs": "^0.0.0",
"babel-runtime": "^6.22.0",
"exec-promise": "^0.7.0"
"exec-promise": "^0.7.0",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.0.0"
},
"devDependencies": {
"babel-cli": "^6.24.1",
@@ -38,14 +39,18 @@
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
"execa": "^0.10.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.9.5",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
"prepare": "yarn run build"
},
"babel": {
"plugins": [

View File

@@ -0,0 +1,15 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const handler = getHandler({ url: 'file:///' })
for (const vhd of args) {
try {
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
console.log('ok:', vhd)
} catch (error) {
console.error('nok:', vhd, error)
}
}
}

View File

@@ -0,0 +1,12 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
await vhd.readHeaderAndFooter()
console.log(vhd.header)
console.log(vhd.footer)
}

View File

@@ -0,0 +1,21 @@
import path from 'path'
import { createSyntheticStream } from 'vhd-lib'
import { createWriteStream } from 'fs'
import { getHandler } from '@xen-orchestra/fs'
export default async function main (args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> <output VHD>`
}
const handler = getHandler({ url: 'file:///' })
return new Promise((resolve, reject) => {
createSyntheticStream(handler, path.resolve(args[0]))
.on('error', reject)
.pipe(
createWriteStream(args[1])
.on('error', reject)
.on('finish', resolve)
)
})
}

View File

@@ -1,19 +1,44 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
import { resolve } from 'path'
import Vhd from './vhd'
import commands from './commands'
execPromise(async args => {
const vhd = new Vhd(
new RemoteHandlerLocal({ url: 'file:///' }),
resolve(args[0])
function runCommand (commands, [command, ...args]) {
if (command === undefined || command === '-h' || command === '--help') {
command = 'help'
}
const fn = commands[command]
if (fn === undefined) {
if (command === 'help') {
return `Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${this.command} ${command}`)
.join('\n\n')}`
}
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
}
return fn.call(
{
__proto__: this,
command: `${this.command} ${command}`,
},
args
)
}
await vhd.readHeaderAndFooter()
console.log(vhd._header)
console.log(vhd._footer)
})
execPromise(
runCommand.bind(
{
command: 'vhd-cli',
runCommand,
},
commands
)
)

View File

@@ -0,0 +1,28 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import command from './commands/info'
const initialDir = process.cwd()
jest.setTimeout(10000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('can run the command', async () => {
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
await command(['empty.vhd'])
})

View File

@@ -1,461 +0,0 @@
import assert from 'assert'
import fu from 'struct-fu'
import { dirname } from 'path'
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
/* eslint-disable no-unused-vars */
const HARD_DISK_TYPE_DIFFERENCING = 4
const HARD_DISK_TYPE_DYNAMIC = 3
const HARD_DISK_TYPE_FIXED = 2
const PLATFORM_CODE_NONE = 0
export const SECTOR_SIZE = 512
/* eslint-enable no-unused vars */
// ===================================================================
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
fu.struct('dataOffset', [
fu.uint32('high'), // 16
fu.uint32('low'), // 20
]),
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
fu.struct('originalSize', [
// At the creation, current size of the hard disk.
fu.uint32('high'), // 40
fu.uint32('low'), // 44
]),
fu.struct('currentSize', [
// Current size of the virtual disk. At the creation: currentSize = originalSize.
fu.uint32('high'), // 48
fu.uint32('low'), // 52
]),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.byte('reserved', 426), // 86
])
const FOOTER_SIZE = fuFooter.size
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
fu.struct('tableOffset', [
// Absolute byte offset of the Block Allocation Table.
fu.uint32('high'),
fu.uint32('low'),
]),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.byte('reserved1', 4),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
fu.struct('platformDataOffset', [
// Absolute byte offset of the locator data.
fu.uint32('high'),
fu.uint32('low'),
]),
],
8
),
fu.byte('reserved2', 256),
])
const HEADER_SIZE = fuHeader.size
// ===================================================================
// Helpers
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
// bytes[] bit manipulation
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
const setBit = (map, bit) => {
map[bit >> 3] |= 1 << (bit & 7)
}
const unsetBit = (map, bit) => {
map[bit >> 3] &= ~(1 << (bit & 7))
}
const addOffsets = (...offsets) =>
offsets.reduce(
(a, b) =>
b == null
? a
: typeof b === 'object'
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
: { bytes: a.bytes + b, bits: a.bits },
{ bytes: 0, bits: 0 }
)
const pack = (field, value, buf, offset) => {
field.pack(value, buf, addOffsets(field.offset, offset))
}
const unpack = (field, buf, offset) =>
field.unpack(buf, addOffsets(field.offset, offset))
// ===================================================================
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
const streamToExistingBuffer = (
stream,
buffer,
offset = 0,
end = buffer.length
) =>
new Promise((resolve, reject) => {
assert(offset >= 0)
assert(end > offset)
assert(end <= buffer.length)
let i = offset
const onData = chunk => {
const prev = i
i += chunk.length
if (i > end) {
return onError(new Error('too much data'))
}
chunk.copy(buffer, prev)
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(i - offset)
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
// ===================================================================
// Returns the checksum of a raw struct.
const computeChecksum = (struct, buf, offset = 0) => {
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumField = struct.fields.checksum
const checksumOffset = offset + checksumField.offset
for (let i = offset, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = offset + struct.size;
i < n;
++i
) {
sum += buf[i]
}
return ~sum >>> 0
}
const verifyChecksum = (struct, buf, offset) =>
unpack(struct.fields.checksum, buf, offset) ===
computeChecksum(struct, buf, offset)
const getParentLocatorSize = parentLocatorEntry => {
const { platformDataSpace } = parentLocatorEntry
if (platformDataSpace < SECTOR_SIZE) {
return platformDataSpace * SECTOR_SIZE
}
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
}
// ===================================================================
// Euclidean division, returns the quotient and the remainder of a / b.
const div = (a, b) => [Math.floor(a / b), a % b]
export default class Vhd {
constructor (handler, path) {
this._handler = handler
this._path = path
this._blockAllocationTable = null
this._blockBitmapSize = null
this._footer = null
this._header = null
this._parent = null
this._sectorsPerBlock = null
}
// Read `length` bytes starting from `begin`.
//
// - if `buffer`: it is filled starting from `offset`, and the
// number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_read (begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
return this._handler
.createReadStream(this._path, {
end: begin + length - 1,
start: begin,
})
.then(
buf
? stream =>
streamToExistingBuffer(
stream,
buf,
offset,
(offset || 0) + length
)
: streamToNewBuffer
)
}
// - if `buffer`: it is filled with 0 starting from `offset`, and
// the number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_zeroes (length, buf, offset = 0) {
if (buf) {
assert(offset >= 0)
assert(length > 0)
const end = offset + length
assert(end <= buf.length)
buf.fill(0, offset, end)
return Promise.resolve(length)
}
return Promise.resolve(Buffer.alloc(length))
}
// Return the position of a block in the VHD or undefined if not found.
_getBlockAddress (block) {
assert(block >= 0)
assert(block < this._header.maxTableEntries)
const blockAddr = this._blockAllocationTable[block]
if (blockAddr !== 0xffffffff) {
return blockAddr * SECTOR_SIZE
}
}
// -----------------------------------------------------------------
async readHeaderAndFooter () {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
if (!verifyChecksum(fuFooter, buf)) {
throw new Error('footer checksum does not match')
}
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
throw new Error('header checksum does not match')
}
return this._initMetadata(
unpack(fuHeader, buf, FOOTER_SIZE),
unpack(fuFooter, buf)
)
}
async _initMetadata (header, footer) {
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
assert(sectorsPerBlock % 1 === 0)
// 1 bit per sector, rounded up to full sectors
this._blockBitmapSize =
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
assert(this._blockBitmapSize === SECTOR_SIZE)
this._footer = footer
this._header = header
this.size = uint32ToUint64(this._footer.currentSize)
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
const parent = new Vhd(
this._handler,
`${dirname(this._path)}/${header.parentUnicodeName}`
)
await parent.readHeaderAndFooter()
await parent.readBlockAllocationTable()
this._parent = parent
}
}
// -----------------------------------------------------------------
async readBlockAllocationTable () {
const { maxTableEntries, tableOffset } = this._header
const fuTable = fu.uint32(maxTableEntries)
this._blockAllocationTable = unpack(
fuTable,
await this._read(uint32ToUint64(tableOffset), fuTable.size)
)
}
// -----------------------------------------------------------------
// read a single sector in a block
async _readBlockSector (block, sector, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
assert(begin + length <= SECTOR_SIZE)
const blockAddr = this._getBlockAddress(block)
const blockBitmapSize = this._blockBitmapSize
const parent = this._parent
if (
blockAddr &&
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
) {
return this._read(
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
length,
buf,
offset
)
}
return parent
? parent._readBlockSector(block, sector, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
_readBlock (block, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
const { blockSize } = this._header
assert(begin + length <= blockSize)
const blockAddr = this._getBlockAddress(block)
const parent = this._parent
if (!blockAddr) {
return parent
? parent._readBlock(block, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
if (!parent) {
return this._read(
blockAddr + this._blockBitmapSize + begin,
length,
buf,
offset
)
}
// FIXME: we should read as many sectors in a single pass as
// possible for maximum perf.
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
return this._readBlockSector(
block,
sector,
beginInSector,
Math.min(length, SECTOR_SIZE - beginInSector),
buf,
offset
)
}
read (buf, begin, length = buf.length, offset) {
assert(Buffer.isBuffer(buf))
assert(begin >= 0)
const { size } = this
if (begin >= size) {
return Promise.resolve(0)
}
const { blockSize } = this._header
const [block, beginInBlock] = div(begin, blockSize)
return this._readBlock(
block,
beginInBlock,
Math.min(length, blockSize - beginInBlock, size - begin),
buf,
offset
)
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,56 @@
{
"name": "vhd-lib",
"version": "0.0.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.44",
"@xen-orchestra/fs": "^0.0.0",
"async-iterator-to-stream": "^1.0.2",
"execa": "^0.10.0",
"from2": "^2.3.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.9.5",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1",
"tmp": "^0.0.33"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"fs-promise": "^2.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,7 @@
const MASK = 0x80
export const set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -0,0 +1,37 @@
import { SECTOR_SIZE } from './_constants'
export default function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrackCylinder
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrackCylinder = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
} else {
sectorsPerTrackCylinder = 17
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrackCylinder = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrackCylinder = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
}
const cylinders = Math.ceil(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
}

View File

@@ -0,0 +1,30 @@
export const BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
export const CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
export const FOOTER_SIZE = 512
export const HEADER_SIZE = 1024
export const SECTOR_SIZE = 512
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPE_FIXED = 2
export const DISK_TYPE_DYNAMIC = 3
export const DISK_TYPE_DIFFERENCING = 4
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORM_NONE = 0
export const PLATFORM_WI2R = 0x57693272
export const PLATFORM_WI2K = 0x5769326b
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16

View File

@@ -0,0 +1,56 @@
import { v4 as generateUuid } from 'uuid'
import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_FIXED,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PLATFORM_WI2K,
} from './_constants'
export function createFooter (
size,
timestamp,
geometry,
dataOffset,
diskType = DISK_TYPE_FIXED
) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
fileFormatVersion: FILE_FORMAT_VERSION,
dataOffset,
timestamp,
creatorApplication: CREATOR_APPLICATION,
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
originalSize: size,
currentSize: size,
diskGeometry: geometry,
diskType,
uuid: generateUuid(null, []),
})
checksumStruct(footer, fuFooter)
return footer
}
export function createHeader (
maxTableEntries,
tableOffset = HEADER_SIZE + FOOTER_SIZE,
blockSize = VHD_BLOCK_SIZE_BYTES
) {
const header = fuHeader.pack({
cookie: HEADER_COOKIE,
tableOffset,
headerVersion: HEADER_VERSION,
maxTableEntries,
blockSize,
})
checksumStruct(header, fuHeader)
return header
}

View File

@@ -0,0 +1,121 @@
import assert from 'assert'
import fu from 'struct-fu'
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const uint64Undefinable = fu.derive(
fu.uint32(2),
number =>
number === undefined
? [0xffffffff, 0xffffffff]
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ =>
_[0] === 0xffffffff && _[1] === 0xffffffff
? undefined
: _[0] * SIZE_OF_32_BITS + _[1]
)
export const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64Undefinable('dataOffset'), // offset of the header
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
fu.char('reserved', 426), // 86
])
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
export const fuHeader = fu.struct([
fu.char('cookie', 8),
uint64Undefinable('dataOffset'),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
assert.strictEqual(fuHeader.size, HEADER_SIZE)
export const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
export const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
export function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}

View File

@@ -0,0 +1,37 @@
import { dirname, relative } from 'path'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockAllocationTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
}

View File

@@ -0,0 +1,42 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter } from './_createFooterHeader'
export default asyncIteratorToStream(async function * (size, blockParser) {
const geometry = computeGeometryForSize(size)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry
)
let position = 0
function * filePadding (paddingLength) {
if (paddingLength > 0) {
const chunkSize = 1024 * 1024 // 1Mo
for (
let paddingPosition = 0;
paddingPosition + chunkSize < paddingLength;
paddingPosition += chunkSize
) {
yield Buffer.alloc(chunkSize)
}
yield Buffer.alloc(paddingLength % chunkSize)
}
}
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield * filePadding(paddingLength)
yield next.data
position = next.offsetBytes + next.data.length
}
yield * filePadding(actualSize - position)
yield footer
})

View File

@@ -0,0 +1,126 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
import {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
*/
function createBAT (
firstBlockPosition,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % 512, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
}
export default asyncIteratorToStream(async function * (
diskSize,
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
throw new Error(
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
)
}
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
const batPosition = FOOTER_SIZE + HEADER_SIZE
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
const geometry = computeGeometryForSize(diskSize)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry,
FOOTER_SIZE,
DISK_TYPE_DYNAMIC
)
const header = createHeader(
maxTableEntries,
batPosition,
VHD_BLOCK_SIZE_BYTES
)
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
createBAT(firstBlockPosition, blockAddressList, ratio, bat, bitmapSize)
let position = 0
function * yieldAndTrack (buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
async function * generateFileContent (blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield * yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * 512
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
currentVhdBlockIndex = batIndex
}
const blockOffset = (next.offsetBytes / 512) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + next.offsetBytes % VHD_BLOCK_SIZE_BYTES
)
}
yield * yieldAndTrack(currentBlockWithBitmap)
}
yield * yieldAndTrack(footer, 0)
yield * yieldAndTrack(header, FOOTER_SIZE)
yield * yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield * generateFileContent(blockIterator, bitmapSize, ratio)
yield * yieldAndTrack(footer)
})

View File

@@ -0,0 +1,153 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { dirname, resolve } from 'path'
import Vhd from './vhd'
import {
BLOCK_UNUSED,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const resolveRelativeFromFile = (file, path) =>
resolve('/', dirname(file), path).slice(1)
export default asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockAllocationTable()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// this is the root VHD
const rootVhd = vhds[nVhds - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
const bat = Buffer.allocUnsafe(vhd.batSize)
let footer = {
...vhd.footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil(
(header.tableOffset + bat.length) / SECTOR_SIZE
);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
// TODO: for generic usage the bitmap needs to be properly computed for each block
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd === rootVhd
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})

View File

@@ -0,0 +1,8 @@
export { default } from './vhd'
export { default as chainVhd } from './chain'
export { default as createReadableRawStream } from './createReadableRawStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'

View File

@@ -2,25 +2,25 @@
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { randomBytes } from 'crypto'
import { fromEvent } from 'promise-toolbox'
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
import LocalHandler from './remote-handlers/local'
import vhdMerge, {
chainVhd,
createReadStream,
Vhd,
VHD_SECTOR_SIZE,
} from './vhd-merge'
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
import chainVhd from './chain'
import createReadStream from './createSyntheticStream'
import Vhd from './vhd'
import vhdMerge from './merge'
import { SECTOR_SIZE } from './_constants'
const initialDir = process.cwd()
jest.setTimeout(10000)
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await tmpDir()
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
@@ -57,11 +57,11 @@ test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -70,20 +70,18 @@ test('blocks can be moved', async () => {
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb =>
randomBytes(VHD_SECTOR_SIZE, cb)
)
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
@@ -93,7 +91,7 @@ test('the BAT MSB is not used for sign', async () => {
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockTable()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
@@ -110,7 +108,7 @@ test('the BAT MSB is not used for sign', async () => {
} finally {
fs.close(recoveredFile)
}
const recovered = await streamToBuffer(
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
@@ -124,11 +122,11 @@ test('writeData on empty file', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
@@ -139,11 +137,11 @@ test('writeData in 2 non-overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(
@@ -159,11 +157,11 @@ test('writeData in 2 overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
@@ -179,11 +177,11 @@ test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -203,7 +201,7 @@ test('coalesce works with empty parent files', async () => {
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
@@ -226,11 +224,11 @@ test('coalesce works in normal cases', async () => {
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
@@ -242,7 +240,7 @@ test('coalesce works in normal cases', async () => {
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
@@ -261,7 +259,7 @@ test('coalesce works in normal cases', async () => {
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
@@ -270,15 +268,16 @@ test('coalesce works in normal cases', async () => {
)
})
test('createReadStream passes vhd-util check', async () => {
test('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = createReadStream(handler, 'randomfile.vhd')
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
)
await checkFile('recovered.vhd')
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
})

View File

@@ -0,0 +1,77 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import concurrency from 'limit-concurrency-decorator'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
// Merge vhd child into vhd parent.
export default concurrency(2)(async function merge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(
parentDiskType === DISK_TYPE_DIFFERENCING ||
parentDiskType === DISK_TYPE_DYNAMIC
)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
// Read allocation table of child/parent.
await Promise.all([
parentVhd.readBlockAllocationTable(),
childVhd.readBlockAllocationTable(),
])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
})

View File

@@ -0,0 +1,134 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
import { createFooter } from './_createFooterHeader'
import createReadableRawVHDStream from './createReadableRawStream'
import createReadableSparseVHDStream from './createReadableSparseStream'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const fileSize = 1000
const stream = createReadableRawVHDStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = createReadableRawVHDStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
})
).rejects.toThrow('Received out of order blocks')
})
test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
const fileSize = blockSize * 110
const stream = createReadableSparseVHDStream(
fileSize,
blockSize,
blocks.map(b => b.offsetBytes),
blocks
)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
])
const out1 = await readFile('out1.raw')
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -1,18 +1,30 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import concurrency from 'limit-concurrency-decorator'
import fu from 'struct-fu'
import { dirname, relative } from 'path'
import getStream from 'get-stream'
import { fromEvent } from 'promise-toolbox'
import type RemoteHandler from './remote-handlers/abstract'
import constantStream from './constant-stream'
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
import constantStream from './_constant-stream'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
SECTOR_SIZE,
} from './_constants'
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
@@ -24,160 +36,12 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
//
// ===================================================================
// Sizes in bytes.
const VHD_FOOTER_SIZE = 512
const VHD_HEADER_SIZE = 1024
export const VHD_SECTOR_SIZE = 512
// Block allocation table entry size. (Block addr)
const VHD_ENTRY_SIZE = 4
const VHD_PARENT_LOCATOR_ENTRIES = 8
const VHD_PLATFORM_CODE_NONE = 0
// Types of backup treated. Others are not supported.
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
export const PLATFORM_NONE = 0
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
// Other.
const BLOCK_UNUSED = 0xffffffff
const BIT_MASK = 0x80
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64('dataOffset'), // offset of the header, should always be 512
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.char('reserved', 426), // 86
])
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.uint8('dataOffsetUnused', 8),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
VHD_PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
// ===================================================================
// Helpers
// ===================================================================
const computeBatSize = entries =>
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
// Check/Set a bit on a vhd map.
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
const mapSetBit = (map, bit) => {
map[bit >> 3] |= BIT_MASK >> (bit & 7)
}
const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// ===================================================================
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
@@ -187,6 +51,10 @@ const assertChecksum = (name, buf, struct) => {
}
}
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
// Format:
@@ -210,7 +78,8 @@ const assertChecksum = (name, buf, struct) => {
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export class Vhd {
export default class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
@@ -232,7 +101,12 @@ export class Vhd {
}
_read (start, n) {
return this._readStream(start, n).then(streamToBuffer)
return this._readStream(start, n)
.then(getStream.buffer)
.then(buf => {
assert.equal(buf.length, n)
return buf
})
}
containsBlock (id) {
@@ -243,15 +117,15 @@ export class Vhd {
getEndOfHeaders () {
const { header } = this
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
let end = FOOTER_SIZE + HEADER_SIZE
// Max(end, block allocation table end)
end = Math.max(end, header.tableOffset + this.batSize)
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
if (entry.platformCode !== PLATFORM_NONE) {
end = Math.max(
end,
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
@@ -266,7 +140,7 @@ export class Vhd {
// Returns the first sector after data.
getEndOfData () {
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
@@ -283,25 +157,46 @@ export class Vhd {
return sectorsToBytes(end)
}
// Get the beginning (footer + header) of a vhd file.
async readHeaderAndFooter () {
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
// TODO: extract the checks into reusable functions:
// - better human reporting
// - auto repair if possible
async readHeaderAndFooter (checkSecondFooter = true) {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
assert(
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
'footer1 !== footer2'
)
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
header.blockSize / VHD_SECTOR_SIZE
))
const sectorsPerBlock = (this.sectorsPerBlock =
header.blockSize / SECTOR_SIZE)
// Compute bitmap size in sectors.
// Default: 1.
@@ -317,23 +212,18 @@ export class Vhd {
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
}
// Check if a vhd object has a block allocation table.
hasBlockAllocationTableMap () {
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockTable () {
async readBlockAllocationTable () {
const { header } = this
this.blockTable = await this._read(
header.tableOffset,
header.maxTableEntries * VHD_ENTRY_SIZE
header.maxTableEntries * 4
)
}
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
return this.blockTable.readUInt32BE(block * 4)
}
_readBlock (blockId, onlyBitmap = false) {
@@ -350,11 +240,11 @@ export class Vhd {
onlyBitmap
? { id: blockId, bitmap: buf }
: {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
)
}
@@ -371,7 +261,7 @@ export class Vhd {
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += VHD_ENTRY_SIZE
j += 4
if (i === n) {
const error = new Error('no allocated block found')
@@ -395,7 +285,7 @@ export class Vhd {
}
i += 1
j += VHD_ENTRY_SIZE
j += 4
}
return { first, firstSector, last, lastSector }
@@ -419,9 +309,9 @@ export class Vhd {
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
@@ -431,7 +321,7 @@ export class Vhd {
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
@@ -439,7 +329,7 @@ export class Vhd {
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
debug(
@@ -478,7 +368,7 @@ export class Vhd {
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
@@ -491,21 +381,18 @@ export class Vhd {
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * VHD_ENTRY_SIZE
const i = block * 4
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(
blockTable.slice(i, i + VHD_ENTRY_SIZE),
this.header.tableOffset + i
)
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
@@ -647,7 +534,7 @@ export class Vhd {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = VHD_FOOTER_SIZE
const offset = FOOTER_SIZE
debug(
`Write header at: ${offset} (checksum=${
header.checksum
@@ -657,12 +544,12 @@ export class Vhd {
}
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
@@ -681,11 +568,11 @@ export class Vhd {
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
VHD_SECTOR_SIZE,
SECTOR_SIZE,
buffer.length
)
let inputBuffer
@@ -695,7 +582,7 @@ export class Vhd {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * VHD_SECTOR_SIZE,
offsetInBlockSectors * SECTOR_SIZE,
startInBuffer,
endInBuffer
)
@@ -710,10 +597,10 @@ export class Vhd {
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
firstLocatorOffset / VHD_SECTOR_SIZE
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
firstLocatorOffset / SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
@@ -722,269 +609,23 @@ export class Vhd {
}
return firstLocatorOffset
}
}
// Merge vhd child into vhd parent.
//
// Child must be a delta backup !
// Parent must be a full backup !
//
// TODO: update the identifier of the parent VHD.
export default concurrency(2)(async function vhdMerge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
// Child must be a delta.
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
throw new Error('Unable to merge, child is not a delta backup.')
}
// Allocation table map is not yet implemented.
if (
parentVhd.hasBlockAllocationTableMap() ||
childVhd.hasBlockAllocationTableMap()
) {
throw new Error('Unsupported allocation table map.')
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
async setUniqueParentLocator (fileNameString) {
const { header } = this
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
} finally {
await parentHandler.closeFile(parentFd)
}
})
// returns true if the child was actually modified
export async function chainVhd (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(parentName, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
await childVhd._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
dataSpaceSectors
)
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
await childVhd.writeHeader()
await childVhd.writeFooter()
return true
}
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockTable()
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// data of our synthetic VHD
// TODO: empty parentUuid and parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: 512 + 1024,
parentUnicodeName: '',
}
const bat = Buffer.allocUnsafe(
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
)
let footer = {
...vhd.footer,
diskType: HARD_DISK_TYPE_DYNAMIC,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock =
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
if (!vhd.containsBlock(iBlock)) {
yield * emitBlockSectors(iVhd + 1, i, n)
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.16.7",
"version": "0.16.9",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [

View File

@@ -143,7 +143,9 @@ export const isOpaqueRef = value =>
const RE_READ_ONLY_METHOD = /^[^.]+\.get_/
const isReadOnlyCall = (method, args) =>
args.length === 1 && isOpaqueRef(args[0]) && RE_READ_ONLY_METHOD.test(method)
args.length === 1 &&
typeof args[0] === 'string' &&
RE_READ_ONLY_METHOD.test(method)
// Prepare values before passing them to the XenAPI:
//
@@ -180,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
// -------------------------------------------------------------------
const getTaskResult = (task, onSuccess, onFailure) => {
const getTaskResult = task => {
const { status } = task
if (status === 'cancelled') {
return [onFailure(new Cancel('task canceled'))]
return Promise.reject(new Cancel('task canceled'))
}
if (status === 'failure') {
return [onFailure(wrapError(task.error_info))]
return Promise.reject(wrapError(task.error_info))
}
if (status === 'success') {
// the result might be:
// - empty string
// - an opaque reference
// - an XML-RPC value
return [onSuccess(task.result)]
return Promise.resolve(task.result)
}
}
@@ -244,7 +246,7 @@ export class Xapi extends EventEmitter {
objects.getKey = getKey
this._objectsByRefs = createObject(null)
this._objectsByRefs['OpaqueRef:NULL'] = null
this._objectsByRefs['OpaqueRef:NULL'] = undefined
this._taskWatchers = Object.create(null)
@@ -407,15 +409,15 @@ export class Xapi extends EventEmitter {
return this._readOnly && !isReadOnlyCall(method, args)
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
: this._sessionCall(`Async.${method}`, args).then(taskRef => {
$cancelToken.promise.then(() => {
// TODO: do not trigger if the task is already over
this._sessionCall('task.cancel', [taskRef]).catch(noop)
})
$cancelToken.promise.then(() => {
// TODO: do not trigger if the task is already over
this._sessionCall('task.cancel', [taskRef]).catch(noop)
})
return this.watchTask(taskRef)::lastly(() => {
this._sessionCall('task.destroy', [taskRef]).catch(noop)
return this.watchTask(taskRef)::lastly(() => {
this._sessionCall('task.destroy', [taskRef]).catch(noop)
})
})
})
}
// create a task and automatically destroy it when settled
@@ -577,31 +579,31 @@ export class Xapi extends EventEmitter {
// redirection before consuming body
const promise = isStream
? doRequest({
body: '',
body: '',
// omit task_id because this request will fail on purpose
query: 'task_id' in query ? omit(query, 'task_id') : query,
// omit task_id because this request will fail on purpose
query: 'task_id' in query ? omit(query, 'task_id') : query,
maxRedirects: 0,
}).then(
response => {
response.req.abort()
return doRequest()
},
error => {
let response
if (error != null && (response = error.response) != null) {
maxRedirects: 0,
}).then(
response => {
response.req.abort()
return doRequest()
},
error => {
let response
if (error != null && (response = error.response) != null) {
response.req.abort()
const { headers: { location }, statusCode } = response
if (statusCode === 302 && location !== undefined) {
return doRequest(location)
const { headers: { location }, statusCode } = response
if (statusCode === 302 && location !== undefined) {
return doRequest(location)
}
}
}
throw error
}
)
throw error
}
)
: doRequest()
return promise.then(response => {
@@ -640,11 +642,11 @@ export class Xapi extends EventEmitter {
let watcher = watchers[ref]
if (watcher === undefined) {
// sync check if the task is already settled
const task = this.objects.all[ref]
const task = this._objectsByRefs[ref]
if (task !== undefined) {
const result = getTaskResult(task, Promise.resolve, Promise.reject)
if (result) {
return result[0]
const result = getTaskResult(task)
if (result !== undefined) {
return result
}
}
@@ -791,11 +793,12 @@ export class Xapi extends EventEmitter {
const taskWatchers = this._taskWatchers
const taskWatcher = taskWatchers[ref]
if (
taskWatcher !== undefined &&
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
) {
delete taskWatchers[ref]
if (taskWatcher !== undefined) {
const result = getTaskResult(object)
if (result !== undefined) {
taskWatcher.resolve(result)
delete taskWatchers[ref]
}
}
}
}

View File

@@ -28,7 +28,7 @@
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "7.0.0-beta.42",
"@babel/polyfill": "7.0.0-beta.44",
"bluebird": "^3.5.1",
"chalk": "^2.2.0",
"event-to-promise": "^0.8.0",
@@ -49,10 +49,10 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/preset-flow": "7.0.0-beta.42",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.5.1",
"version": "0.5.2",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [

View File

@@ -52,6 +52,7 @@ class AuthSamlXoPlugin {
new Strategy(this._conf, async (profile, done) => {
const name = profile[this._usernameField]
if (!name) {
console.warn('xo-server-auth-saml:', profile)
done('no name found for this user')
return
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.10.0",
"version": "0.11.0",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -35,6 +35,7 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.26.0",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -42,6 +43,7 @@
"devDependencies": {
"babel-cli": "^6.24.1",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
@@ -56,7 +58,8 @@
},
"babel": {
"plugins": [
"lodash"
"lodash",
"transform-runtime"
],
"presets": [
[

View File

@@ -1,6 +1,6 @@
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, startCase } from 'lodash'
import { find, forEach, get, startCase } from 'lodash'
import pkg from '../package'
@@ -41,9 +41,9 @@ const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormater = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
: timestamp => moment(timestamp).format(DATE_FORMAT)
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
@@ -66,6 +66,7 @@ const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
const UNHEALTHY_VDI_CHAIN_MESSAGE =
@@ -94,14 +95,351 @@ class BackupReportsXoPlugin {
this._xo.removeListener('job:terminated', this._report)
}
_wrapper (status) {
return new Promise(resolve => resolve(this._listener(status))).catch(
logError
)
_wrapper (status, job, schedule) {
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule)
: this._listener(status, job, schedule)
)
).catch(logError)
}
async _backupNgListener (runJobId, _, { timezone }) {
const xo = this._xo
const logs = await xo.getBackupNgLogs(runJobId)
const jobLog = logs['roots'][0]
const vmsTaskLog = logs[jobLog.id]
const { reportWhen, mode } = jobLog.data || {}
if (reportWhen === 'never') {
return
}
const formatDate = createDateFormater(timezone)
const jobName = (await xo.getJob(jobLog.jobId, 'backup')).name
if (jobLog.error !== undefined) {
const [globalStatus, icon] =
jobLog.error.message === NO_VMS_MATCH_THIS_PATTERN
? ['Skipped', ICON_SKIPPED]
: ['Failure', ICON_FAILURE]
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(jobLog.start)}`,
`- **End time**: ${formatDate(jobLog.end)}`,
`- **Duration**: ${formatDuration(jobLog.duration)}`,
`- **Error**: ${jobLog.error.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${jobName} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Backup report for ${jobName} - Error : ${
jobLog.error.message
}`,
})
}
const failedVmsText = []
const skippedVmsText = []
const successfulVmsText = []
const nagiosText = []
let globalMergeSize = 0
let globalTransferSize = 0
let nFailures = 0
let nSkipped = 0
for (const vmTaskLog of vmsTaskLog || []) {
const vmTaskStatus = vmTaskLog.status
if (vmTaskStatus === 'success' && reportWhen === 'failure') {
return
}
const vmId = vmTaskLog.data.id
let vm
try {
vm = xo.getObject(vmId)
} catch (e) {}
const text = [
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
`- **Start time**: ${formatDate(vmTaskLog.start)}`,
`- **End time**: ${formatDate(vmTaskLog.end)}`,
`- **Duration**: ${formatDuration(vmTaskLog.duration)}`,
]
const failedSubTasks = []
const operationsText = []
const srsText = []
const remotesText = []
for (const subTaskLog of logs[vmTaskLog.taskId] || []) {
const { data, status, result, message } = subTaskLog
const icon =
subTaskLog.status === 'success' ? ICON_SUCCESS : ICON_FAILURE
const errorMessage = ` **Error**: ${get(result, 'message')}`
if (message === 'snapshot') {
operationsText.push(`- **Snapshot** ${icon}`)
if (status === 'failure') {
failedSubTasks.push('Snapshot')
operationsText.push('', errorMessage)
}
} else if (data.type === 'remote') {
const remoteId = data.id
const remote = await xo.getRemote(remoteId).catch(() => {})
remotesText.push(
`- **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${remoteId}) ${icon}`
)
if (status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : remoteId)
remotesText.push('', errorMessage)
}
} else {
const srId = data.id
let sr
try {
sr = xo.getObject(srId)
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, srId]
srsText.push(`- **${srName}** (${srUuid}) ${icon}`)
if (status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : srId)
srsText.push('', errorMessage)
}
}
}
if (operationsText.length !== 0) {
operationsText.unshift(`#### Operations`, '')
}
if (srsText.length !== 0) {
srsText.unshift(`#### SRs`, '')
}
if (remotesText.length !== 0) {
remotesText.unshift(`#### remotes`, '')
}
const subText = [...operationsText, '', ...srsText, '', ...remotesText]
const result = vmTaskLog.result
if (vmTaskStatus === 'failure' && result !== undefined) {
const { message } = result
if (isSkippedError(result)) {
++nSkipped
skippedVmsText.push(
...text,
`- **Reason**: ${
message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: message
}`,
''
)
nagiosText.push(
`[(Skipped) ${
vm !== undefined ? vm.name_label : 'undefined'
} : ${message} ]`
)
} else {
++nFailures
failedVmsText.push(...text, `- **Error**: ${message}`, '')
nagiosText.push(
`[(Failed) ${
vm !== undefined ? vm.name_label : 'undefined'
} : ${message} ]`
)
}
} else {
let transferSize, transferDuration, mergeSize, mergeDuration
forEach(logs[vmTaskLog.taskId], ({ taskId }) => {
if (transferSize !== undefined) {
return false
}
const transferTask = find(logs[taskId], { message: 'transfer' })
if (transferTask !== undefined) {
transferSize = transferTask.result.size
transferDuration = transferTask.end - transferTask.start
}
const mergeTask = find(logs[taskId], { message: 'merge' })
if (mergeTask !== undefined) {
mergeSize = mergeTask.result.size
mergeDuration = mergeTask.end - mergeTask.start
}
})
if (transferSize !== undefined) {
globalTransferSize += transferSize
text.push(
`- **Transfer size**: ${formatSize(transferSize)}`,
`- **Transfer speed**: ${formatSpeed(
transferSize,
transferDuration
)}`
)
}
if (mergeSize !== undefined) {
globalMergeSize += mergeSize
text.push(
`- **Merge size**: ${formatSize(mergeSize)}`,
`- **Merge speed**: ${formatSpeed(mergeSize, mergeDuration)}`
)
}
if (vmTaskStatus === 'failure') {
++nFailures
failedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[(Failed) ${
vm !== undefined ? vm.name_label : 'undefined'
}: (failed)[${failedSubTasks.toString()}]]`
)
} else {
successfulVmsText.push(...text, '', '', ...subText, '')
}
}
}
const globalSuccess = nFailures === 0 && nSkipped === 0
if (reportWhen === 'failure' && globalSuccess) {
return
}
const nVms = vmsTaskLog.length
const nSuccesses = nVms - nFailures - nSkipped
const globalStatus = globalSuccess
? `Success`
: nFailures !== 0 ? `Failure` : `Skipped`
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(jobLog.start)}`,
`- **End time**: ${formatDate(jobLog.end)}`,
`- **Duration**: ${formatDuration(jobLog.duration)}`,
`- **Successes**: ${nSuccesses} / ${nVms}`,
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
'',
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
'',
...failedVmsText
)
}
if (nSkipped !== 0) {
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
}
if (nSuccesses !== 0 && reportWhen !== 'failure') {
markdown.push(
'---',
'',
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
'',
...successfulVmsText
)
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${jobName} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
})
}
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
nagiosStatus,
message: nagiosMarkdown,
}),
])
}
_listener (status) {
const { calls } = status
const { calls, timezone, error } = status
const formatDate = createDateFormater(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =
error.message === NO_VMS_MATCH_THIS_PATTERN
? ['Skipped', ICON_SKIPPED]
: ['Failure', ICON_FAILURE]
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(status.end - status.start)}`,
`- **Error**: ${error.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
error.message
}`,
})
}
const callIds = Object.keys(calls)
const nCalls = callIds.length
@@ -139,8 +477,6 @@ class BackupReportsXoPlugin {
const skippedBackupsText = []
const successfulBackupText = []
const formatDate = createDateFormater(status.timezone)
forEach(calls, call => {
const { id = call.params.vm } = call.params
@@ -226,9 +562,8 @@ class BackupReportsXoPlugin {
return
}
const { end, start } = status
const { tag } = oneCall.params
const duration = end - start
const duration = status.end - status.start
const nSuccesses = nCalls - nFailures - nSkipped
const globalStatus = globalSuccess
? `Success`
@@ -238,8 +573,8 @@ class BackupReportsXoPlugin {
`## Global status: ${globalStatus}`,
'',
`- **Type**: ${formatMethod(method)}`,
`- **Start time**: ${formatDate(start)}`,
`- **End time**: ${formatDate(end)}`,
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(duration)}`,
`- **Successes**: ${nSuccesses} / ${nCalls}`,
]
@@ -285,37 +620,20 @@ class BackupReportsXoPlugin {
markdown = markdown.join('\n')
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: globalSuccess ? 0 : 2,
message: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
}),
])
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
})
}
}

View File

@@ -30,7 +30,7 @@
"node": ">=4"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.2",
"@xen-orchestra/cron": "^1.0.3",
"babel-runtime": "^6.11.6",
"lodash": "^4.16.2"
},

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-perf-alert",
"version": "0.0.0",
"version": "0.1.0",
"license": "AGPL-3.0",
"description": "",
"keywords": [],
@@ -20,16 +20,16 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.2",
"@xen-orchestra/cron": "^1.0.3",
"d3-time-format": "^2.1.1",
"json5": "^1.0.0",
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/preset-flow": "^7.0.0-beta.40",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "^7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"

View File

@@ -1,10 +1,11 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const VM_FUNCTIONS = {
cpuUsage: {
name: 'VM CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
},
},
memoryUsage: {
name: 'VM memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
const HOST_FUNCTIONS = {
cpuUsage: {
name: 'host CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
},
},
memoryUsage: {
name: 'host memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
)
},
getDisplayableValue,
shouldAlarm: () => {
return getDisplayableValue() > threshold
},
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
}
const SR_FUNCTIONS = {
storageUsage: {
name: 'SR storage usage',
description:
'Raises an alarm when the used disk space % is higher than the threshold',
unit: '% used',
comparator: '>',
createGetter: threshold => sr => {
const getDisplayableValue = () =>
sr.physical_utilisation * 100 / sr.physical_size
return {
getDisplayableValue,
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
const TYPE_FUNCTION_MAP = {
vm: VM_FUNCTIONS,
host: HOST_FUNCTIONS,
sr: SR_FUNCTIONS,
}
// list of currently ringing alarms, to avoid double notification
@@ -229,11 +250,52 @@ export const configurationSchema = {
required: ['uuids'],
},
},
srMonitors: {
type: 'array',
title: 'SR Monitors',
description:
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
'The Average is compared to the threshold and an alarm is raised upon crossing',
items: {
type: 'object',
properties: {
uuids: {
title: 'SRs',
type: 'array',
items: {
type: 'string',
$type: 'SR',
},
},
variableName: {
title: 'Alarm Type',
description: Object.keys(SR_FUNCTIONS)
.map(
k =>
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
SR_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
default: Object.keys(SR_FUNCTIONS)[0],
enum: Object.keys(SR_FUNCTIONS),
},
alarmTriggerLevel: {
title: 'Threshold',
description:
'The direction of the crossing is given by the Alarm type',
type: 'number',
default: 80,
},
},
required: ['uuids'],
},
},
toEmails: {
type: 'array',
title: 'Email addresses',
description: 'Email addresses of the alert recipients',
items: {
type: 'string',
},
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
currentAlarms[alarmId] = true
raiseCallback(alarmId)
}
} else {
if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
} else if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
}
}
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
clearCurrentAlarms()
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_generateUrl (type, object) {
const map = {
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
const { baseUrl } = this._configuration
const { uuid } = object
switch (type) {
case 'vm':
return `${baseUrl}#/vms/${uuid}/stats`
case 'host':
return `${baseUrl}#/hosts/${uuid}/stats`
case 'sr':
return `${baseUrl}#/srs/${uuid}/general`
default:
return 'unknown type'
}
return map[type]()
}
async test () {
const hostMonitorPart2 = await Promise.all(
map(this._getMonitors(), async m => {
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
return `
const monitorBodies = await Promise.all(
map(
this._getMonitors(),
async m => `
## Monitor for ${m.title}
${m.tableHeader}
${tableBody.join('')}`
})
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
)
)
this._sendAlertEmail(
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
`
# Performance Alert Test
Your alarms and their current status:
${hostMonitorPart2.join('\n')}`
${monitorBodies.join('\n')}`
)
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_parseDefinition (definition) {
const alarmId = `${definition.objectType}|${definition.variableName}|${
definition.alarmTriggerLevel
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
definition.alarmTriggerPeriod !== undefined
? definition.alarmTriggerPeriod
: 60
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
return {
...definition,
alarmId,
vmFunction: typeFunction,
title: `${typeText} ${definition.variableName} ${
typeFunction.comparator
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
title: `${typeFunction.name} ${typeFunction.comparator} ${
definition.alarmTriggerLevel
}${typeFunction.unit}`,
snapshot: async () => {
return Promise.all(
map(definition.uuids, async uuid => {
try {
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
const objectLink = `[${
monitoredObject.name_label
}](${this._generateUrl(definition.objectType, monitoredObject)})`
const rrd = await this.getRrd(monitoredObject, observationPeriod)
const couldFindRRD = rrd !== null
const result = {
object: monitoredObject,
couldFindRRD,
objectLink: objectLink,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
uuid,
name: definition.name,
object: this._xo.getXapi(uuid).getObject(uuid),
}
if (!couldFindRRD) {
return result
if (result.object === undefined) {
throw new Error('object not found')
}
const data = parseData(rrd, monitoredObject.uuid)
const textValue =
data.getDisplayableValue().toFixed(1) + typeFunction.unit
const shouldAlarm = data.shouldAlarm()
return {
...result,
value: data.getDisplayableValue(),
shouldAlarm: shouldAlarm,
textValue: textValue,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: ${textValue}\n`,
tableItem: `${objectLink} | ${textValue} | ${
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
}\n`,
result.objectLink = `[${
result.object.name_label
}](${this._generateUrl(definition.objectType, result.object)})`
if (typeFunction.createGetter === undefined) {
// Stats via RRD
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
} else {
// Stats via XAPI
const getter = typeFunction.createGetter(
definition.alarmTriggerLevel
)
const data = getter(result.object)
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
result.listItem = ` * ${result.objectLink}: ${
result.value === undefined
? "**Can't read performance counters**"
: result.value.toFixed(1) + typeFunction.unit
}\n`
return result
} catch (_) {
return {
uuid,
object: null,
couldFindRRD: false,
objectLink: `cannot find object ${uuid}`,
listItem: ` * ${typeText} ${uuid} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
}
}
})
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
_getMonitors () {
return map(this._configuration.hostMonitors, def =>
this._parseDefinition({ ...def, objectType: 'host' })
).concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.srMonitors, def =>
this._parseDefinition({ ...def, objectType: 'sr' })
)
)
}
async _checkMonitors () {
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
for (const entry of snapshot) {
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}|RRD`,
!entry.couldFindRRD,
entry.value === undefined,
() => {
this._sendAlertEmail(
'Secondary Issue',
@@ -477,9 +553,11 @@ ${entry.listItem}`
},
() => {}
)
if (!entry.couldFindRRD) {
if (entry.value === undefined) {
continue
}
const raiseAlarm = alarmId => {
// sample XenCenter message:
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
@@ -500,23 +578,24 @@ ${entry.listItem}`
this._sendAlertEmail(
'',
`
## ALERT ${monitor.title}
## ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
const lowerAlarm = alarmId => {
console.log('lowering Alarm', alarmId)
this._sendAlertEmail(
'END OF ALERT',
`
## END OF ALERT ${monitor.title}
## END OF ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}`,
entry.shouldAlarm,

View File

@@ -58,7 +58,8 @@ export const configurationSchema = {
},
port: {
type: 'integer',
description: 'port of the SMTP server (defaults to 25 or 465 for TLS)',
description:
'port of the SMTP server (defaults to 25 or 465 for TLS)',
},
secure: {
default: false,

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.4.0",
"version": "0.4.2",
"license": "AGPL-3.0",
"description": "",
"keywords": [
@@ -34,7 +34,7 @@
"node": ">=4"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.2",
"@xen-orchestra/cron": "^1.0.3",
"babel-runtime": "^6.23.0",
"handlebars": "^4.0.6",
"html-minifier": "^3.5.8",

View File

@@ -159,30 +159,30 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<td>{{normaliseValue global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<td>{{normaliseValue global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<tr>
<tr>
<td>Disk read:</td>
<td>{{global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
<td>{{normaliseValue global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
</td>
<tr>
<tr>
<td>Disk write:</td>
<td>{{global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
<td>{{normaliseValue global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
<td>{{normaliseValue global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
<td>{{normaliseValue global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -205,7 +205,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
@@ -216,7 +216,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -226,7 +226,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -236,7 +236,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -246,7 +246,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -256,7 +256,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -275,28 +275,28 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
<td>{{normaliseValue global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
<td>{{normaliseValue global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
</td>
</td>
<tr>
<tr>
<td>Load average:</td>
<td>{{global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
<td>{{normaliseValue global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
<td>{{normaliseValue global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
<td>{{normaliseValue global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -318,7 +318,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
<tr>
@@ -328,7 +328,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -338,7 +338,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} </td>
<td>{{normaliseValue this.value}} </td>
</tr>
{{/each}}
<tr>
@@ -348,7 +348,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -358,7 +358,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -378,7 +378,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
</table>

View File

@@ -8,6 +8,7 @@ import {
filter,
find,
forEach,
get,
isFinite,
map,
orderBy,
@@ -137,7 +138,7 @@ Handlebars.registerHelper(
value =>
new Handlebars.SafeString(
isFinite(+value) && +value !== 0
? value > 0
? (value = round(value, 2)) > 0
? `(<b style="color: green;">▲ ${value}</b>)`
: `(<b style="color: red;">▼ ${String(value).slice(1)}</b>)`
: ''
@@ -159,12 +160,12 @@ function computeMean (values) {
return sum / n
}
const computeDoubleMean = val => computeMean(val.map(computeMean))
const computeDoubleMean = val => computeMean(map(val, computeMean))
function computeMeans (objects, options) {
return zipObject(
options,
map(options, opt => round(computeMean(map(objects, opt)), 2))
map(options, opt => computeMean(map(objects, opt)), 2)
)
}
@@ -185,7 +186,7 @@ function getTop (objects, options) {
obj => ({
uuid: obj.uuid,
name: obj.name,
value: round(obj[opt], 2),
value: obj[opt],
})
)
)
@@ -200,7 +201,7 @@ function computePercentage (curr, prev, options) {
opt =>
prev[opt] === 0 || prev[opt] === null
? 'NONE'
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
: `${(curr[opt] - prev[opt]) * 100 / prev[opt]}`
)
)
}
@@ -212,6 +213,10 @@ function getDiff (oldElements, newElements) {
}
}
function getMemoryUsedMetric ({ memory, memoryFree = memory }) {
return map(memory, (value, key) => value - memoryFree[key])
}
// ===================================================================
async function getVmsStats ({ runningVms, xo }) {
@@ -223,11 +228,15 @@ async function getVmsStats ({ runningVms, xo }) {
uuid: vm.uuid,
name: vm.name_label,
cpu: computeDoubleMean(vmStats.stats.cpus),
ram: computeMean(vmStats.stats.memoryUsed) / gibPower,
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
diskRead:
computeDoubleMean(values(get(vmStats.stats.xvds, 'r'))) / mibPower,
diskWrite:
computeDoubleMean(values(get(vmStats.stats.xvds, 'w'))) / mibPower,
netReception:
computeDoubleMean(get(vmStats.stats.vifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(get(vmStats.stats.vifs, 'tx')) / kibPower,
}
})
),
@@ -245,11 +254,12 @@ async function getHostsStats ({ runningHosts, xo }) {
uuid: host.uuid,
name: host.name_label,
cpu: computeDoubleMean(hostStats.stats.cpus),
ram: computeMean(hostStats.stats.memoryUsed) / gibPower,
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
load: computeMean(hostStats.stats.load),
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
netReception:
computeDoubleMean(get(hostStats.stats.pifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
computeDoubleMean(get(hostStats.stats.pifs, 'tx')) / kibPower,
}
})
),

View File

@@ -8,12 +8,14 @@ try {
const filtered = frames.filter(function (frame) {
const name = frame && frame.getFileName()
return (// has a filename
return (
// has a filename
name &&
// contains a separator (no internal modules)
name.indexOf(sep) !== -1 &&
// does not start with `internal`
name.lastIndexOf('internal', 0) !== -1)
name.lastIndexOf('internal', 0) !== -1
)
})
// depd (used amongst other by express requires at least 3 frames

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "5.17.4",
"version": "5.19.6",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -31,9 +31,10 @@
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "7.0.0-beta.42",
"@babel/polyfill": "7.0.0-beta.44",
"@marsaud/smb2-promise": "^0.2.1",
"@xen-orchestra/cron": "^1.0.2",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/fs": "^0.0.0",
"ajv": "^6.1.1",
"app-conf": "^0.5.0",
"archiver": "^2.1.0",
@@ -59,6 +60,7 @@
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.6.2",
"helmet": "^3.9.0",
@@ -109,27 +111,29 @@
"tmp": "^0.0.33",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.0.0",
"ws": "^5.0.0",
"xen-api": "^0.16.7",
"xen-api": "^0.16.9",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.2.3",
"xo-collection": "^0.4.1",
"xo-common": "^0.1.1",
"xo-remote-parser": "^0.3",
"xo-vmdk-to-vhd": "0.0.12"
"xo-vmdk-to-vhd": "^0.1.1",
"yazl": "^2.4.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.42",
"@babel/core": "7.0.0-beta.42",
"@babel/plugin-proposal-decorators": "7.0.0-beta.42",
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.42",
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.42",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.42",
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.40",
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.40",
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.40",
"@babel/preset-env": "7.0.0-beta.42",
"@babel/preset-flow": "7.0.0-beta.42",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-proposal-decorators": "7.0.0-beta.44",
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.44",
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.44",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.44",
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.44",
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",

View File

@@ -1,3 +1,7 @@
import { basename } from 'path'
import { safeDateFormat } from '../utils'
export function createJob ({ schedules, ...job }) {
job.userId = this.user.id
return this.createBackupNgJob(job, schedules)
@@ -27,6 +31,10 @@ createJob.params = {
settings: {
type: 'object',
},
srs: {
type: 'object',
optional: true,
},
vms: {
type: 'object',
},
@@ -81,6 +89,10 @@ editJob.params = {
type: 'object',
optional: true,
},
srs: {
type: 'object',
optional: true,
},
vms: {
type: 'object',
optional: true,
@@ -122,6 +134,14 @@ runJob.params = {
// -----------------------------------------------------------------------------
export function getAllLogs () {
return this.getBackupNgLogs()
}
getAllLogs.permission = 'admin'
// -----------------------------------------------------------------------------
export function deleteVmBackup ({ id }) {
return this.deleteVmBackupNg(id)
}
@@ -163,3 +183,88 @@ importVmBackup.params = {
type: 'string',
},
}
// -----------------------------------------------------------------------------
export function listPartitions ({ remote, disk }) {
return this.listBackupNgDiskPartitions(remote, disk)
}
listPartitions.permission = 'admin'
listPartitions.params = {
disk: {
type: 'string',
},
remote: {
type: 'string',
},
}
export function listFiles ({ remote, disk, partition, path }) {
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
}
listFiles.permission = 'admin'
listFiles.params = {
disk: {
type: 'string',
},
partition: {
type: 'string',
optional: true,
},
path: {
type: 'string',
},
remote: {
type: 'string',
},
}
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
const zipStream = await this.fetchBackupNgPartitionFiles(
remote,
disk,
partition,
paths
)
res.setHeader('content-disposition', 'attachment')
res.setHeader('content-type', 'application/octet-stream')
return zipStream
}
export async function fetchFiles (params) {
const { paths } = params
let filename = `restore_${safeDateFormat(new Date())}`
if (paths.length === 1) {
filename += `_${basename(paths[0])}`
}
filename += '.zip'
return this.registerHttpRequest(handleFetchFiles, params, {
suffix: encodeURI(`/${filename}`),
}).then(url => ({ $getFrom: url }))
}
fetchFiles.permission = 'admin'
fetchFiles.params = {
disk: {
type: 'string',
},
partition: {
optional: true,
type: 'string',
},
paths: {
items: { type: 'string' },
minLength: 1,
type: 'array',
},
remote: {
type: 'string',
},
}

View File

@@ -242,7 +242,7 @@ emergencyShutdownHost.resolve = {
// -------------------------------------------------------------------
export function stats ({ host, granularity }) {
return this.getXapiHostStats(host, granularity)
return this.getXapiHostStats(host._xapiId, granularity)
}
stats.description = 'returns statistic of the host'

View File

@@ -1,19 +1,5 @@
export async function get ({ namespace }) {
const logger = await this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger
.createReadStream()
.on('data', data => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
export function get ({ namespace }) {
return this.getLogs(namespace)
}
get.description = 'returns logs list for one namespace'

View File

@@ -189,6 +189,7 @@ export async function createNfs ({
server,
serverPath,
nfsVersion,
nfsOptions,
}) {
const xapi = this.getXapi(host)
@@ -202,6 +203,11 @@ export async function createNfs ({
deviceConfig.nfsversion = nfsVersion
}
// if NFS options given
if (nfsOptions) {
deviceConfig.options = nfsOptions
}
const srRef = await xapi.call(
'SR.create',
host._xapiRef,
@@ -226,6 +232,7 @@ createNfs.params = {
server: { type: 'string' },
serverPath: { type: 'string' },
nfsVersion: { type: 'string', optional: true },
nfsOptions: { type: 'string', optional: true },
}
createNfs.resolve = {
@@ -241,7 +248,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
const xapi = this.getXapi(host)
const deviceConfig = {
scsiId,
SCSIid: scsiId,
}
const srRef = await xapi.call(
@@ -251,7 +258,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
'0',
nameLabel,
nameDescription,
'lvmoohba', // SR LVM over HBA
'lvmohba', // SR LVM over HBA
'user', // recommended by Citrix
true,
{}
@@ -366,7 +373,7 @@ export async function probeHba ({ host }) {
let xml
try {
await xapi.call('SR.probe', host._xapiRef, 'type', {})
await xapi.call('SR.probe', host._xapiRef, {}, 'lvmohba', {})
throw new Error('the call above should have thrown an error')
} catch (error) {
@@ -382,7 +389,7 @@ export async function probeHba ({ host }) {
hbaDevices.push({
hba: hbaDevice.hba.trim(),
path: hbaDevice.path.trim(),
scsciId: hbaDevice.SCSIid.trim(),
scsiId: hbaDevice.SCSIid.trim(),
size: hbaDevice.size.trim(),
vendor: hbaDevice.vendor.trim(),
})
@@ -487,8 +494,8 @@ export async function probeIscsiIqns ({
// if we give user and password
if (chapUser && chapPassword) {
deviceConfig.chapUser = chapUser
deviceConfig.chapPassword = chapPassword
deviceConfig.chapuser = chapUser
deviceConfig.chappassword = chapPassword
}
// if we give another port than default iSCSI
@@ -668,6 +675,34 @@ probeIscsiExists.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect if this HBA already exists in XAPI
// It returns a table of SR UUID, empty if no existing connections
export async function probeHbaExists ({ host, scsiId }) {
const xapi = this.getXapi(host)
const deviceConfig = {
SCSIid: scsiId,
}
const xml = parseXml(
await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'lvmohba', {})
)
// get the UUID of SR connected to this LUN
return ensureArray(xml.SRlist.SR).map(sr => ({ uuid: sr.UUID.trim() }))
}
probeHbaExists.params = {
host: { type: 'string' },
scsiId: { type: 'string' },
}
probeHbaExists.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect if this NFS SR already exists in XAPI
// It returns a table of SR UUID, empty if no existing connections
@@ -803,3 +838,23 @@ getUnhealthyVdiChainsLength.params = {
getUnhealthyVdiChainsLength.resolve = {
sr: ['id', 'SR', 'operate'],
}
// -------------------------------------------------------------------
export function stats ({ sr, granularity }) {
return this.getXapiSrStats(sr._xapiId, granularity)
}
stats.description = 'returns statistic of the sr'
stats.params = {
id: { type: 'string' },
granularity: {
type: 'string',
optional: true,
},
}
stats.resolve = {
sr: ['id', 'SR', 'view'],
}

View File

@@ -12,6 +12,10 @@ import { forEach, map, mapFilter, parseSize } from '../utils'
// ===================================================================
export function getHaValues () {
return ['best-effort', 'restart', '']
}
function checkPermissionOnSrs (vm, permission = 'operate') {
const permissions = []
forEach(vm.$VBDs, vbdId => {
@@ -151,10 +155,10 @@ export async function create (params) {
await Promise.all([
params.share
? Promise.all(
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
)
)
)
: this.addAcl(user.id, vm.id, 'admin'),
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet),
])
@@ -323,6 +327,7 @@ create.resolve = {
async function delete_ ({
delete_disks, // eslint-disable-line camelcase
force,
forceDeleteDefaultTemplate,
vm,
deleteDisks = delete_disks,
@@ -363,7 +368,12 @@ async function delete_ ({
;this.setVmResourceSet(vm._xapiId, null)::ignoreErrors()
}
return xapi.deleteVm(vm._xapiId, deleteDisks, force)
return xapi.deleteVm(
vm._xapiId,
deleteDisks,
force,
forceDeleteDefaultTemplate
)
}
delete_.params = {
@@ -378,6 +388,11 @@ delete_.params = {
optional: true,
type: 'boolean',
},
forceDeleteDefaultTemplate: {
optional: true,
type: 'boolean',
},
}
delete_.resolve = {
vm: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate'],
@@ -545,11 +560,11 @@ set.params = {
name_description: { type: 'string', optional: true },
// TODO: provides better filtering of values for HA possible values: "best-
// effort" meaning "try to restart this VM if possible but don't consider the
// Pool to be overcommitted if this is not possible"; "restart" meaning "this
// VM should be restarted"; "" meaning "do not try to restart this VM"
high_availability: { type: 'boolean', optional: true },
high_availability: {
optional: true,
pattern: new RegExp(`^(${getHaValues().join('|')})$`),
type: 'string',
},
// Number of virtual CPUs to allocate.
CPUs: { type: 'integer', optional: true },
@@ -586,6 +601,9 @@ set.params = {
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
// Emulate HVM C000 PCI device for Windows Update to fetch or update PV drivers
hasVendorDevice: { type: 'boolean', optional: true },
// Move the vm In to/Out of Self Service
resourceSet: { type: ['string', 'null'], optional: true },
@@ -1340,7 +1358,7 @@ detachPci.resolve = {
// -------------------------------------------------------------------
export function stats ({ vm, granularity }) {
return this.getXapiVmStats(vm, granularity)
return this.getXapiVmStats(vm._xapiId, granularity)
}
stats.description = 'returns statistics about the VM'

View File

@@ -1,7 +1,6 @@
import getStream from 'get-stream'
import { forEach } from 'lodash'
import { streamToBuffer } from '../utils'
// ===================================================================
export function clean () {
@@ -42,7 +41,9 @@ function handleGetAllObjects (req, res, { filter, limit }) {
export function getAllObjects ({ filter, limit, ndjson = false }) {
return ndjson
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then($getFrom => ({ $getFrom }))
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then(
$getFrom => ({ $getFrom })
)
: this.getObjects({ filter, limit })
}
@@ -59,7 +60,7 @@ getAllObjects.params = {
export async function importConfig () {
return {
$sendTo: await this.registerHttpRequest(async (req, res) => {
await this.importConfig(JSON.parse(await streamToBuffer(req)))
await this.importConfig(JSON.parse(await getStream.buffer(req)))
res.end('config successfully imported')
}),

View File

@@ -8,7 +8,7 @@ describe('debounce()', () => {
let i
class Foo {
@debounce(1e1)
@debounce(10)
foo () {
++i
}
@@ -18,22 +18,28 @@ describe('debounce()', () => {
i = 0
})
it('works', done => {
const foo = new Foo()
it('works', () => {
const savedNow = Date.now
try {
const now = Date.now()
const mockDate = jest.fn()
Date.now = mockDate
const foo = new Foo()
expect(i).toBe(0)
expect(i).toBe(0)
mockDate.mockReturnValueOnce(now)
foo.foo()
expect(i).toBe(1)
foo.foo()
expect(i).toBe(1)
mockDate.mockReturnValueOnce(now + 2)
foo.foo()
expect(i).toBe(1)
foo.foo()
expect(i).toBe(1)
setTimeout(() => {
mockDate.mockReturnValueOnce(now + 2 + 10)
foo.foo()
expect(i).toBe(2)
done()
}, 2e1)
} finally {
Date.now = savedNow
}
})
})

View File

@@ -7,6 +7,7 @@ import has from 'lodash/has'
import helmet from 'helmet'
import includes from 'lodash/includes'
import proxyConsole from './proxy-console'
import pw from 'pw'
import serveStatic from 'serve-static'
import startsWith from 'lodash/startsWith'
import stoppable from 'stoppable'
@@ -227,12 +228,12 @@ async function registerPlugin (pluginPath, pluginName) {
// instance.
const instance = isFunction(factory)
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
: factory
await this.registerPlugin(
@@ -311,6 +312,13 @@ async function makeWebServerListen (
) {
if (cert && key) {
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
if (opts.key.includes('ENCRYPTED')) {
opts.passphrase = await new Promise(resolve => {
console.log('Encrypted key %s', key)
process.stdout.write(`Enter pass phrase: `)
pw(resolve)
})
}
}
try {
const niceAddress = await webServer.listen(opts)

View File

@@ -1,16 +1,15 @@
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser } from 'parse-pairs'
import { isArray, map } from 'lodash'
// ===================================================================
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => (fields, ...args) =>
execa
.stdout(command, [
const makeFunction = command => async (fields, ...args) => {
return splitLines(
await execa.stdout(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
String(fields),
...args,
])
.then(stdout =>
map(
splitLines(stdout),
isArray(fields)
? parse
: line => {
const data = parse(line)
return data[fields]
}
)
)
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
export const lvs = makeFunction('lvs')
export const pvs = makeFunction('pvs')

View File

@@ -16,6 +16,11 @@ export default {
key: {
type: 'string',
},
type: {
default: 'call',
enum: ['backup', 'call'],
},
data: {},
},
required: ['event', 'userId', 'jobId', 'key'],
required: ['event', 'userId', 'jobId'],
}

View File

@@ -0,0 +1,18 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.end'],
},
taskId: {
type: 'string',
description: 'identifier of this task',
},
status: {
enum: ['canceled', 'failure', 'success'],
},
result: {},
},
required: ['event', 'taskId', 'status'],
}

View File

@@ -0,0 +1,15 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.start'],
},
parentId: {
type: 'string',
description: 'identifier of the parent task or job',
},
data: {},
},
required: ['event'],
}

View File

@@ -1,28 +0,0 @@
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
export { streamToNewBuffer as default }

View File

@@ -55,10 +55,6 @@ export const asyncMap = (collection, iteratee) => {
// -------------------------------------------------------------------
export streamToBuffer from './stream-to-new-buffer'
// -------------------------------------------------------------------
export function camelToSnakeCase (string) {
return string.replace(
/([a-z0-9])([A-Z])/g,

View File

@@ -1,72 +0,0 @@
import execa from 'execa'
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
import LocalHandler from './remote-handlers/local.js'
async function testVhdMerge () {
console.log('before merge')
const moOfRandom = 4
await execa('bash', [
'-c',
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
])
await execa('bash', [
'-c',
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
])
await execa('qemu-img', [
'convert',
'-f',
'raw',
'-Ovpc',
'randomfile',
'randomfile.vhd',
])
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
const childVhd = new Vhd(handler, 'randomfile.vhd')
console.log('changing type')
await childVhd.readHeaderAndFooter()
console.log('child vhd', childVhd.footer.currentSize, originalSize)
await childVhd.readBlockTable()
childVhd.footer.diskType = 4 // Delta backup.
await childVhd.writeFooter()
console.log('chained')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
console.log('merged')
const parentVhd = new Vhd(handler, 'empty.vhd')
await parentVhd.readHeaderAndFooter()
console.log('parent vhd', parentVhd.footer.currentSize)
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-Oraw',
'empty.vhd',
'recovered',
])
await execa('truncate', ['-s', originalSize, 'recovered'])
console.log('ls', (await execa('ls', ['-lt'])).stdout)
console.log(
'diff',
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
)
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
await vhd.ensureBatSize(300)
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
*/
console.log(await handler.list())
console.log('lol')
}
export { testVhdMerge as default }

View File

@@ -232,13 +232,11 @@ const TRANSFORMS = {
}
const { major, minor } = guestMetrics.PV_drivers_version
const [hostMajor, hostMinor] = (
obj.$resident_on || obj.$pool.$master
).software_version.product_version.split('.')
return major >= hostMajor && minor >= hostMinor
? 'up to date'
: 'out of date'
return {
major,
minor,
}
})()
let resourceSet = otherConfig['xo:resource_set']
@@ -294,8 +292,7 @@ const TRANSFORMS = {
}
})(),
// TODO: there is two possible value: "best-effort" and "restart"
high_availability: Boolean(obj.ha_restart_priority),
high_availability: obj.ha_restart_priority,
memory: (function () {
const dynamicMin = +obj.memory_dynamic_min
@@ -329,6 +326,7 @@ const TRANSFORMS = {
other: otherConfig,
os_version: (guestMetrics && guestMetrics.os_version) || null,
power_state: obj.power_state,
hasVendorDevice: obj.has_vendor_device,
resourceSet,
snapshots: link(obj, 'snapshots'),
startTime: metrics && toTimestamp(metrics.start_time),
@@ -512,9 +510,7 @@ const TRANSFORMS = {
vdi.type += '-snapshot'
vdi.snapshot_time = toTimestamp(obj.snapshot_time)
vdi.$snapshot_of = link(obj, 'snapshot_of')
}
if (!obj.managed) {
} else if (!obj.managed) {
vdi.type += '-unmanaged'
}

View File

@@ -1,10 +1,32 @@
import endsWith from 'lodash/endsWith'
import JSON5 from 'json5'
import limitConcurrency from 'limit-concurrency-decorator'
import { BaseError } from 'make-error'
import {
endsWith,
findKey,
forEach,
get,
identity,
map,
mapValues,
mean,
sum,
uniq,
zipWith,
} from 'lodash'
import { parseDateTime } from './xapi'
export class FaultyGranularity extends BaseError {}
// -------------------------------------------------------------------
// according to https://xapi-project.github.io/xen-api/metrics.html
// The values are stored at intervals of:
// - 5 seconds for the past 10 minutes
// - one minute for the past 2 hours
// - one hour for the past week
// - one day for the past year
const RRD_STEP_SECONDS = 5
const RRD_STEP_MINUTES = 60
const RRD_STEP_HOURS = 3600
@@ -17,6 +39,7 @@ const RRD_STEP_FROM_STRING = {
days: RRD_STEP_DAYS,
}
// points = intervalInSeconds / step
const RRD_POINTS_PER_STEP = {
[RRD_STEP_SECONDS]: 120,
[RRD_STEP_MINUTES]: 120,
@@ -24,16 +47,6 @@ const RRD_POINTS_PER_STEP = {
[RRD_STEP_DAYS]: 366,
}
export class XapiStatsError extends BaseError {}
export class UnknownLegendFormat extends XapiStatsError {
constructor (line) {
super('Unknown legend line: ' + line)
}
}
export class FaultyGranularity extends XapiStatsError {}
// -------------------------------------------------------------------
// Utils
// -------------------------------------------------------------------
@@ -47,353 +60,185 @@ function convertNanToNull (value) {
return isNaN(value) ? null : value
}
async function getServerTimestamp (xapi, host) {
const serverLocalTime = await xapi.call('host.get_servertime', host.$ref)
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1000)
async function getServerTimestamp (xapi, hostRef) {
const serverLocalTime = await xapi.call('host.get_servertime', hostRef)
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1e3)
}
// -------------------------------------------------------------------
// Stats
// -------------------------------------------------------------------
function getNewHostStats () {
return {
cpus: [],
pifs: {
rx: [],
tx: [],
},
load: [],
memory: [],
memoryFree: [],
memoryUsed: [],
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
map(dataRow, ({ values }) =>
transformValue(convertNanToNull(values[legendIndex]))
)
const combineStats = (stats, path, combineValues) =>
zipWith(...map(stats, path), (...values) => combineValues(values))
// It browse the object in depth and initialise it's properties
// The targerPath can be a string or an array containing the depth
// targetPath: [a, b, c] => a.b.c
const getValuesFromDepth = (obj, targetPath) => {
if (typeof targetPath === 'string') {
return (obj[targetPath] = [])
}
}
function getNewVmStats () {
return {
cpus: [],
vifs: {
rx: [],
tx: [],
},
xvds: {
r: {},
w: {},
},
memory: [],
memoryFree: [],
memoryUsed: [],
}
}
// -------------------------------------------------------------------
// Stats legends
// -------------------------------------------------------------------
function getNewHostLegends () {
return {
cpus: [],
pifs: {
rx: [],
tx: [],
},
load: null,
memoryFree: null,
memory: null,
}
}
function getNewVmLegends () {
return {
cpus: [],
vifs: {
rx: [],
tx: [],
},
xvds: {
r: [],
w: [],
},
memoryFree: null,
memory: null,
}
}
// Compute one legend line for one host
function parseOneHostLegend (hostLegend, type, index) {
let resReg
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
hostLegend.cpus[resReg[1]] = index
} else if ((resReg = /^pif_eth([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
if (resReg[2] === 'rx') {
hostLegend.pifs.rx[resReg[1]] = index
} else {
hostLegend.pifs.tx[resReg[1]] = index
forEach(targetPath, (path, key) => {
if (obj[path] === undefined) {
obj = obj[path] = targetPath.length - 1 === key ? [] : {}
return
}
} else if (type === 'loadavg') {
hostLegend.load = index
} else if (type === 'memory_free_kib') {
hostLegend.memoryFree = index
} else if (type === 'memory_total_kib') {
hostLegend.memory = index
}
obj = obj[path]
})
return obj
}
// Compute one legend line for one vm
function parseOneVmLegend (vmLegend, type, index) {
let resReg
const testMetric = (test, type) =>
typeof test === 'string'
? test === type
: typeof test === 'function' ? test(type) : test.exec(type)
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
vmLegend.cpus[resReg[1]] = index
} else if ((resReg = /^vif_([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
if (resReg[2] === 'rx') {
vmLegend.vifs.rx[resReg[1]] = index
} else {
vmLegend.vifs.tx[resReg[1]] = index
}
} else if ((resReg = /^vbd_xvd(.)_(read|write)$/.exec(type))) {
if (resReg[2] === 'read') {
vmLegend.xvds.r[resReg[1]] = index
} else {
vmLegend.xvds.w[resReg[1]] = index
}
} else if (type === 'memory_internal_free') {
vmLegend.memoryFree = index
} else if (endsWith(type, 'memory')) {
vmLegend.memory = index
}
}
const findMetric = (metrics, metricType) => {
let testResult
let metric
// Compute Stats Legends for host and vms from RRD update
function parseLegends (json) {
const hostLegends = getNewHostLegends()
const vmsLegends = {}
forEach(metrics, (current, key) => {
if (current.test === undefined) {
const newValues = findMetric(current, metricType)
json.meta.legend.forEach((value, index) => {
const parsedLine = /^AVERAGE:(host|vm):(.+):(.+)$/.exec(value)
if (parsedLine === null) {
throw new UnknownLegendFormat(value)
}
const [, name, uuid, type] = parsedLine
if (name !== 'vm') {
parseOneHostLegend(hostLegends, type, index)
} else {
if (vmsLegends[uuid] === undefined) {
vmsLegends[uuid] = getNewVmLegends()
metric = newValues.metric
if (metric !== undefined) {
testResult = newValues.testResult
return false
}
parseOneVmLegend(vmsLegends[uuid], type, index)
} else if ((testResult = testMetric(current.test, metricType))) {
metric = current
return false
}
})
return [hostLegends, vmsLegends]
return { metric, testResult }
}
// -------------------------------------------------------------------
// The metrics:
// test: can be a function, regexp or string, default to: currentKey
// getPath: default to: () => currentKey
// transformValue: default to: identity
const STATS = {
host: {
load: {
test: 'loadavg',
},
memoryFree: {
test: 'memory_free_kib',
transformValue: value => value * 1024,
},
memory: {
test: 'memory_total_kib',
transformValue: value => value * 1024,
},
cpus: {
test: /^cpu(\d+)$/,
getPath: matches => ['cpus', matches[1]],
transformValue: value => value * 1e2,
},
pifs: {
rx: {
test: /^pif_eth(\d+)_rx$/,
getPath: matches => ['pifs', 'rx', matches[1]],
},
tx: {
test: /^pif_eth(\d+)_tx$/,
getPath: matches => ['pifs', 'tx', matches[1]],
},
},
iops: {
r: {
test: /^iops_read_(\w+)$/,
getPath: matches => ['iops', 'r', matches[1]],
},
w: {
test: /^iops_write_(\w+)$/,
getPath: matches => ['iops', 'w', matches[1]],
},
},
ioThroughput: {
r: {
test: /^io_throughput_read_(\w+)$/,
getPath: matches => ['ioThroughput', 'r', matches[1]],
transformValue: value => value * 2 ** 20,
},
w: {
test: /^io_throughput_write_(\w+)$/,
getPath: matches => ['ioThroughput', 'w', matches[1]],
transformValue: value => value * 2 ** 20,
},
},
latency: {
r: {
test: /^read_latency_(\w+)$/,
getPath: matches => ['latency', 'r', matches[1]],
transformValue: value => value / 1e3,
},
w: {
test: /^write_latency_(\w+)$/,
getPath: matches => ['latency', 'w', matches[1]],
transformValue: value => value / 1e3,
},
},
iowait: {
test: /^iowait_(\w+)$/,
getPath: matches => ['iowait', matches[1]],
transformValue: value => value * 1e2,
},
},
vm: {
memoryFree: {
test: 'memory_internal_free',
transformValue: value => value * 1024,
},
memory: {
test: metricType => endsWith(metricType, 'memory'),
},
cpus: {
test: /^cpu(\d+)$/,
getPath: matches => ['cpus', matches[1]],
transformValue: value => value * 1e2,
},
vifs: {
rx: {
test: /^vif_(\d+)_rx$/,
getPath: matches => ['vifs', 'rx', matches[1]],
},
tx: {
test: /^vif_(\d+)_tx$/,
getPath: matches => ['vifs', 'tx', matches[1]],
},
},
xvds: {
r: {
test: /^vbd_xvd(.)_read$/,
getPath: matches => ['xvds', 'r', matches[1]],
},
w: {
test: /^vbd_xvd(.)_write$/,
getPath: matches => ['xvds', 'w', matches[1]],
},
},
},
}
// -------------------------------------------------------------------
export default class XapiStats {
constructor () {
this._vms = {}
this._hosts = {}
this._statsByObject = {}
}
// -------------------------------------------------------------------
// Remove stats (Helper)
// -------------------------------------------------------------------
_removeOlderStats (source, dest, pointsPerStep) {
for (const key in source) {
if (key === 'cpus') {
for (const cpuIndex in source.cpus) {
dest.cpus[cpuIndex].splice(
0,
dest.cpus[cpuIndex].length - pointsPerStep
)
}
// If the number of cpus has been decreased, remove !
let offset
if ((offset = dest.cpus.length - source.cpus.length) > 0) {
dest.cpus.splice(-offset)
}
} else if (endsWith(key, 'ifs')) {
// For each pif or vif
for (const ifType in source[key]) {
for (const pifIndex in source[key][ifType]) {
dest[key][ifType][pifIndex].splice(
0,
dest[key][ifType][pifIndex].length - pointsPerStep
)
}
// If the number of pifs has been decreased, remove !
let offset
if (
(offset = dest[key][ifType].length - source[key][ifType].length) > 0
) {
dest[key][ifType].splice(-offset)
}
}
} else if (key === 'xvds') {
for (const xvdType in source.xvds) {
for (const xvdLetter in source.xvds[xvdType]) {
dest.xvds[xvdType][xvdLetter].splice(
0,
dest.xvds[xvdType][xvdLetter].length - pointsPerStep
)
}
// If the number of xvds has been decreased, remove !
// FIXME
}
} else if (key === 'load') {
dest.load.splice(0, dest[key].length - pointsPerStep)
} else if (key === 'memory') {
// Load, memory, memoryFree, memoryUsed
const length = dest.memory.length - pointsPerStep
dest.memory.splice(0, length)
dest.memoryFree.splice(0, length)
dest.memoryUsed.splice(0, length)
}
}
}
// -------------------------------------------------------------------
// HOST: Computation and stats update
// -------------------------------------------------------------------
// Compute one stats row for one host
_parseRowHostStats (hostLegends, hostStats, values) {
// Cpus
hostLegends.cpus.forEach((cpuIndex, index) => {
if (hostStats.cpus[index] === undefined) {
hostStats.cpus[index] = []
}
hostStats.cpus[index].push(values[cpuIndex] * 100)
})
// Pifs
for (const pifType in hostLegends.pifs) {
hostLegends.pifs[pifType].forEach((pifIndex, index) => {
if (hostStats.pifs[pifType][index] === undefined) {
hostStats.pifs[pifType][index] = []
}
hostStats.pifs[pifType][index].push(convertNanToNull(values[pifIndex]))
})
}
// Load
hostStats.load.push(convertNanToNull(values[hostLegends.load]))
// Memory.
// WARNING! memory/memoryFree are in kB.
const memory = values[hostLegends.memory] * 1024
const memoryFree = values[hostLegends.memoryFree] * 1024
hostStats.memory.push(memory)
if (hostLegends.memoryFree !== undefined) {
hostStats.memoryFree.push(memoryFree)
hostStats.memoryUsed.push(memory - memoryFree)
}
}
// Compute stats for host from RRD update
_parseHostStats (json, hostname, hostLegends, step) {
const host = this._hosts[hostname][step]
if (host.stats === undefined) {
host.stats = getNewHostStats()
}
for (const row of json.data) {
this._parseRowHostStats(hostLegends, host.stats, row.values)
}
}
// -------------------------------------------------------------------
// VM: Computation and stats update
// -------------------------------------------------------------------
// Compute stats for vms from RRD update
_parseRowVmStats (vmLegends, vmStats, values) {
// Cpus
vmLegends.cpus.forEach((cpuIndex, index) => {
if (vmStats.cpus[index] === undefined) {
vmStats.cpus[index] = []
}
vmStats.cpus[index].push(values[cpuIndex] * 100)
})
// Vifs
for (const vifType in vmLegends.vifs) {
vmLegends.vifs[vifType].forEach((vifIndex, index) => {
if (vmStats.vifs[vifType][index] === undefined) {
vmStats.vifs[vifType][index] = []
}
vmStats.vifs[vifType][index].push(convertNanToNull(values[vifIndex]))
})
}
// Xvds
for (const xvdType in vmLegends.xvds) {
for (const index in vmLegends.xvds[xvdType]) {
if (vmStats.xvds[xvdType][index] === undefined) {
vmStats.xvds[xvdType][index] = []
}
vmStats.xvds[xvdType][index].push(
convertNanToNull(values[vmLegends.xvds[xvdType][index]])
)
}
}
// Memory
// WARNING! memoryFree is in Kb not in b, memory is in b
const memory = values[vmLegends.memory]
const memoryFree = values[vmLegends.memoryFree] * 1024
vmStats.memory.push(memory)
if (vmLegends.memoryFree !== undefined) {
vmStats.memoryFree.push(memoryFree)
vmStats.memoryUsed.push(memory - memoryFree)
}
}
// Compute stats for vms
_parseVmsStats (json, hostname, vmsLegends, step) {
if (this._vms[hostname][step] === undefined) {
this._vms[hostname][step] = {}
}
const vms = this._vms[hostname][step]
for (const uuid in vmsLegends) {
if (vms[uuid] === undefined) {
vms[uuid] = getNewVmStats()
}
}
for (const row of json.data) {
for (const uuid in vmsLegends) {
this._parseRowVmStats(vmsLegends[uuid], vms[uuid], row.values)
}
}
}
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// Execute one http request on a XenServer for get stats
// Return stats (Json format) or throws got exception
@limitConcurrency(3)
@@ -411,40 +256,46 @@ export default class XapiStats {
.then(response => response.readAll().then(JSON5.parse))
}
async _getLastTimestamp (xapi, host, step) {
if (this._hosts[host.address][step] === undefined) {
const serverTimeStamp = await getServerTimestamp(xapi, host)
return serverTimeStamp - step * RRD_POINTS_PER_STEP[step] + step
}
async _getNextTimestamp (xapi, host, step) {
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
const maxDuration = step * RRD_POINTS_PER_STEP[step]
const lastTimestamp = get(this._statsByObject, [
host.uuid,
step,
'endTimestamp',
])
return this._hosts[host.address][step].endTimestamp
if (
lastTimestamp === undefined ||
currentTimeStamp - lastTimestamp + step > maxDuration
) {
return currentTimeStamp - maxDuration + step
}
return lastTimestamp
}
_getPoints (hostname, step, vmId) {
const hostStats = this._hosts[hostname][step]
_getStats (hostUuid, step, vmUuid) {
const hostStats = this._statsByObject[hostUuid][step]
// Return host points
if (vmId === undefined) {
// Return host stats
if (vmUuid === undefined) {
return {
interval: step,
...hostStats,
}
}
const vmsStats = this._vms[hostname][step]
// Return vm points
// Return vm stats
return {
interval: step,
endTimestamp: hostStats.endTimestamp,
stats: (vmsStats && vmsStats[vmId]) || getNewVmStats(),
...this._statsByObject[vmUuid][step],
}
}
async _getAndUpdatePoints (xapi, host, vmId, granularity) {
// Get granularity to use
async _getAndUpdateStats (xapi, { host, vmUuid, granularity }) {
const step =
granularity === undefined || granularity === 0
granularity === undefined
? RRD_STEP_SECONDS
: RRD_STEP_FROM_STRING[granularity]
@@ -455,59 +306,21 @@ export default class XapiStats {
}
// Limit the number of http requests
const hostname = host.address
if (this._hosts[hostname] === undefined) {
this._hosts[hostname] = {}
this._vms[hostname] = {}
}
const hostUuid = host.uuid
if (
this._hosts[hostname][step] !== undefined &&
this._hosts[hostname][step].localTimestamp + step > getCurrentTimestamp()
get(this._statsByObject, [hostUuid, step, 'localTimestamp']) + step >
getCurrentTimestamp()
) {
return this._getPoints(hostname, step, vmId)
return this._getStats(hostUuid, step, vmUuid)
}
// Check if we are in the good interval, use this._hosts[hostname][step].localTimestamp
// for avoid bad requests
// TODO
// Get json
const timestamp = await this._getLastTimestamp(xapi, host, step)
let json = await this._getJson(xapi, host, timestamp)
// Check if the granularity is linked to 'step'
// If it's not the case, we retry other url with the json timestamp
const timestamp = await this._getNextTimestamp(xapi, host, step)
const json = await this._getJson(xapi, host, timestamp)
if (json.meta.step !== step) {
console.log(
`RRD call: Expected step: ${step}, received step: ${
json.meta.step
}. Retry with other timestamp`
throw new FaultyGranularity(
`Unable to get the true granularity: ${json.meta.step}`
)
const serverTimestamp = await getServerTimestamp(xapi, host)
// Approximately: half points are asked
// FIXME: Not the best solution
json = await this._getJson(
xapi,
host,
serverTimestamp - step * (RRD_POINTS_PER_STEP[step] / 2) + step
)
if (json.meta.step !== step) {
throw new FaultyGranularity(
`Unable to get the true granularity: ${json.meta.step}`
)
}
}
// Make new backup slot if necessary
if (this._hosts[hostname][step] === undefined) {
this._hosts[hostname][step] = {
endTimestamp: 0,
localTimestamp: 0,
}
}
// It exists data
@@ -516,70 +329,133 @@ export default class XapiStats {
// timestamp of the oldest data value
// So, we use the timestamp of the oldest data value !
const startTimestamp = json.data[json.meta.rows - 1].t
const endTimestamp = get(this._statsByObject, [
hostUuid,
step,
'endTimestamp',
])
// Remove useless data and reorder
// Note: Older values are at end of json.data.row
const parseOffset =
(this._hosts[hostname][step].endTimestamp - startTimestamp + step) /
step
json.data.splice(json.data.length - parseOffset)
json.data.reverse()
const statsOffset = endTimestamp - startTimestamp + step
if (endTimestamp !== undefined && statsOffset > 0) {
const parseOffset = statsOffset / step
// Remove useless data
// Note: Older values are at end of json.data.row
json.data.splice(json.data.length - parseOffset)
}
// It exists useful data
if (json.data.length > 0) {
const [hostLegends, vmsLegends] = parseLegends(json)
// Compute and update host/vms stats
this._parseVmsStats(json, hostname, vmsLegends, step)
this._parseHostStats(json, hostname, hostLegends, step)
// Remove older stats
this._removeOlderStats(
hostLegends,
this._hosts[hostname][step].stats,
RRD_POINTS_PER_STEP[step]
)
for (const uuid in vmsLegends) {
this._removeOlderStats(
vmsLegends[uuid],
this._vms[hostname][step][uuid],
RRD_POINTS_PER_STEP[step]
// reorder data
json.data.reverse()
forEach(json.meta.legend, (legend, index) => {
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
legend
)
}
const metrics = STATS[type]
if (metrics === undefined) {
return
}
const { metric, testResult } = findMetric(metrics, metricType)
if (metric === undefined) {
return
}
const path =
metric.getPath !== undefined
? metric.getPath(testResult)
: [findKey(metrics, metric)]
const metricValues = getValuesFromDepth(this._statsByObject, [
uuid,
step,
'stats',
...path,
])
metricValues.push(
...computeValues(json.data, index, metric.transformValue)
)
// remove older Values
metricValues.splice(
0,
metricValues.length - RRD_POINTS_PER_STEP[step]
)
})
}
}
// Update timestamp
this._hosts[hostname][step].endTimestamp = json.meta.end
this._hosts[hostname][step].localTimestamp = getCurrentTimestamp()
return this._getPoints(hostname, step, vmId)
const hostStats = this._statsByObject[hostUuid][step]
hostStats.endTimestamp = json.meta.end
hostStats.localTimestamp = getCurrentTimestamp()
return this._getStats(hostUuid, step, vmUuid)
}
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// Warning: This functions returns one reference on internal data
// So, data can be changed by a parallel call on this functions
// It is forbidden to modify the returned data
// Return host stats
async getHostPoints (xapi, hostId, granularity) {
const host = xapi.getObject(hostId)
return this._getAndUpdatePoints(xapi, host, undefined, granularity)
getHostStats (xapi, hostId, granularity) {
return this._getAndUpdateStats(xapi, {
host: xapi.getObject(hostId),
granularity,
})
}
// Return vms stats
async getVmPoints (xapi, vmId, granularity) {
getVmStats (xapi, vmId, granularity) {
const vm = xapi.getObject(vmId)
const host = vm.$resident_on
if (!host) {
throw new Error(`VM ${vmId} is halted or host could not be found.`)
}
return this._getAndUpdatePoints(xapi, host, vm.uuid, granularity)
return this._getAndUpdateStats(xapi, {
host,
vmUuid: vm.uuid,
granularity,
})
}
async getSrStats (xapi, srId, granularity) {
const sr = xapi.getObject(srId)
const hostsStats = {}
await Promise.all(
map(uniq(map(sr.$PBDs, 'host')), hostId =>
this.getHostStats(xapi, hostId, granularity).then(stats => {
hostsStats[xapi.getObject(hostId).name_label] = stats
})
)
)
const srShortUUID = sr.uuid.slice(0, 8)
return {
interval: hostsStats[Object.keys(hostsStats)[0]].interval,
endTimestamp: Math.max(...map(hostsStats, 'endTimestamp')),
localTimestamp: Math.min(...map(hostsStats, 'localTimestamp')),
stats: {
iops: {
r: combineStats(hostsStats, `stats.iops.r[${srShortUUID}]`, sum),
w: combineStats(hostsStats, `stats.iops.w[${srShortUUID}]`, sum),
},
ioThroughput: {
r: combineStats(
hostsStats,
`stats.ioThroughput.r[${srShortUUID}]`,
sum
),
w: combineStats(
hostsStats,
`stats.ioThroughput.w[${srShortUUID}]`,
sum
),
},
latency: {
r: combineStats(hostsStats, `stats.latency.r[${srShortUUID}]`, mean),
w: combineStats(hostsStats, `stats.latency.w[${srShortUUID}]`, mean),
},
iowait: mapValues(hostsStats, `stats.iowait[${srShortUUID}]`),
},
}
}
}

View File

@@ -266,8 +266,8 @@ export default class Xapi extends XapiBase {
return value === null
? removal
: removal
::ignoreErrors()
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
::ignoreErrors()
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
}
})
)
@@ -517,9 +517,9 @@ export default class Xapi extends XapiBase {
const onVmCreation =
nameLabel !== undefined
? vm =>
targetXapi._setObjectProperties(vm, {
nameLabel,
})
targetXapi._setObjectProperties(vm, {
nameLabel,
})
: null
const vm = await targetXapi._getOrWaitObject(
@@ -633,7 +633,12 @@ export default class Xapi extends XapiBase {
)
}
async _deleteVm (vm, deleteDisks = true, force = false) {
async _deleteVm (
vm,
deleteDisks = true,
force = false,
forceDeleteDefaultTemplate = false
) {
debug(`Deleting VM ${vm.name_label}`)
const { $ref } = vm
@@ -654,6 +659,10 @@ export default class Xapi extends XapiBase {
vm = await this.barrier('VM', $ref)
return Promise.all([
forceDeleteDefaultTemplate &&
this._updateObjectMapProperty(vm, 'other_config', {
default_template: null,
}),
this.call('VM.destroy', $ref),
asyncMap(vm.$snapshots, snapshot =>
@@ -693,8 +702,13 @@ export default class Xapi extends XapiBase {
])
}
async deleteVm (vmId, deleteDisks, force) {
return /* await */ this._deleteVm(this.getObject(vmId), deleteDisks, force)
async deleteVm (vmId, deleteDisks, force, forceDeleteDefaultTemplate) {
return /* await */ this._deleteVm(
this.getObject(vmId),
deleteDisks,
force,
forceDeleteDefaultTemplate
)
}
getVmConsole (vmId) {
@@ -860,29 +874,30 @@ export default class Xapi extends XapiBase {
// Look for a snapshot of this vdi in the base VM.
const baseVdi = baseVdis[vdi.snapshot_of]
vdis[vdiRef] =
baseVdi && !disableBaseTags
? {
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: baseVdi.uuid,
},
$SR$uuid: vdi.$SR.uuid,
}
: {
...vdi,
$SR$uuid: vdi.$SR.uuid,
}
vdis[vdiRef] = {
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]:
baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
},
$SR$uuid: vdi.$SR.uuid,
}
streams[`${vdiRef}.vhd`] = () =>
this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
})
const vifs = {}
forEach(vm.$VIFs, vif => {
const network = vif.$network
vifs[vif.$ref] = {
...vif,
$network$uuid: vif.$network.uuid,
$network$uuid: network.uuid,
$network$name_label: network.name_label,
// https://github.com/babel/babel-eslint/issues/595
// eslint-disable-next-line no-undef
$network$VLAN: network.$PIFs[0]?.VLAN,
}
})
@@ -898,9 +913,9 @@ export default class Xapi extends XapiBase {
other_config:
baseVm && !disableBaseTags
? {
...vm.other_config,
[TAG_BASE_DELTA]: baseVm.uuid,
}
...vm.other_config,
[TAG_BASE_DELTA]: baseVm.uuid,
}
: omit(vm.other_config, TAG_BASE_DELTA),
},
},
@@ -979,11 +994,29 @@ export default class Xapi extends XapiBase {
// 2. Delete all VBDs which may have been created by the import.
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
// 3. Create VDIs.
const newVdis = await map(delta.vdis, async vdi => {
// 3. Create VDIs & VBDs.
const vbds = groupBy(delta.vbds, 'VDI')
const newVdis = await map(delta.vdis, async (vdi, vdiId) => {
let newVdi
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (!remoteBaseVdiUuid) {
const newVdi = await this.createVdi({
if (remoteBaseVdiUuid) {
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
} else {
newVdi = await this.createVdi({
...vdi,
other_config: {
...vdi.other_config,
@@ -993,47 +1026,40 @@ export default class Xapi extends XapiBase {
sr: mapVdisSrs[vdi.uuid] || srId,
})
$defer.onFailure(() => this._deleteVdi(newVdi))
return newVdi
}
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
await asyncMap(vbds[vdiId], vbd =>
this.createVbd({
...vbd,
vdi: newVdi,
vm,
})
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
const newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
return newVdi
})::pAll()
const networksOnPoolMasterByDevice = {}
const networksByNameLabelByVlan = {}
let defaultNetwork
forEach(this.pool.$master.$PIFs, pif => {
defaultNetwork = networksOnPoolMasterByDevice[pif.device] = pif.$network
forEach(this.objects.all, object => {
if (object.$type === 'network') {
const pif = object.$PIFs[0]
if (pif === undefined) {
// ignore network
return
}
const vlan = pif.VLAN
const networksByNameLabel =
networksByNameLabelByVlan[vlan] ||
(networksByNameLabelByVlan[vlan] = {})
defaultNetwork = networksByNameLabel[object.name_label] = object
}
})
const { streams } = delta
let transferSize = 0
await Promise.all([
// Create VBDs.
asyncMap(delta.vbds, vbd =>
this.createVbd({
...vbd,
vdi: newVdis[vbd.VDI],
vm,
})
),
// Import VDI contents.
asyncMap(newVdis, async (vdi, id) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
@@ -1055,10 +1081,21 @@ export default class Xapi extends XapiBase {
// Create VIFs.
asyncMap(delta.vifs, vif => {
const network =
(vif.$network$uuid && this.getObject(vif.$network$uuid, null)) ||
networksOnPoolMasterByDevice[vif.device] ||
defaultNetwork
let network =
vif.$network$uuid && this.getObject(vif.$network$uuid, undefined)
if (network === undefined) {
const { $network$VLAN: vlan = -1 } = vif
const networksByNameLabel = networksByNameLabelByVlan[vlan]
if (networksByNameLabel !== undefined) {
network = networksByNameLabel[vif.$network$name_label]
if (network === undefined) {
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
}
} else {
network = defaultNetwork
}
}
if (network) {
return this._createVif(vm, network, vif)
@@ -1277,7 +1314,7 @@ export default class Xapi extends XapiBase {
async _importOvaVm (
$defer,
stream,
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus },
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables },
sr
) {
// 1. Create VM.
@@ -1350,8 +1387,9 @@ export default class Xapi extends XapiBase {
return
}
const vhdStream = await vmdkToVhd(stream)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_RAW)
const table = tables[entry.name]
const vhdStream = await vmdkToVhd(stream, table)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
// See: https://github.com/mafintosh/tar-stream#extracting
// No import parallelization.
@@ -1997,7 +2035,9 @@ export default class Xapi extends XapiBase {
name_label: name,
name_description: description,
MTU: asInteger(mtu),
other_config: {},
// Set automatic to false so XenCenter does not get confused
// https://citrix.github.io/xenserver-sdk/#network
other_config: { automatic: 'false' },
})
$defer.onFailure(() => this.call('network.destroy', networkRef))
if (pifId) {

View File

@@ -37,6 +37,8 @@ declare class XapiObject {
type Id = string | XapiObject
declare export class Vm extends XapiObject {
$snapshots: Vm[];
is_a_snapshot: boolean;
is_a_template: boolean;
name_label: string;
other_config: $Dict<string>;
snapshot_time: number;
@@ -56,7 +58,7 @@ declare export class Xapi {
_updateObjectMapProperty(
object: XapiObject,
property: string,
entries: $Dict<string>
entries: $Dict<null | string>
): Promise<void>;
_setObjectProperties(
object: XapiObject,

View File

@@ -1,9 +1,9 @@
import deferrable from 'golike-defer'
import every from 'lodash/every'
import find from 'lodash/find'
import filter from 'lodash/filter'
import includes from 'lodash/includes'
import isObject from 'lodash/isObject'
import pickBy from 'lodash/pickBy'
import some from 'lodash/some'
import sortBy from 'lodash/sortBy'
import assign from 'lodash/assign'
@@ -445,10 +445,10 @@ export default {
const installableByUuid =
host.license_params.sku_type !== 'free'
? await this._listMissingPoolPatchesOnHost(host)
: filter(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
// List of all installable patches sorted from the newest to the
// oldest.
@@ -488,7 +488,7 @@ export default {
patches =>
host.license_params.sku_type !== 'free'
? patches
: filter(patches, { paid: false, upgrade: false })
: pickBy(patches, { paid: false, upgrade: false })
)
}
})

View File

@@ -310,11 +310,7 @@ export default {
highAvailability: {
set (ha, vm) {
return this.call(
'VM.set_ha_restart_priority',
vm.$ref,
ha ? 'restart' : ''
)
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
},
},
@@ -384,6 +380,8 @@ export default {
tags: true,
hasVendorDevice: true,
vga: {
set (vga, vm) {
if (!includes(XEN_VGA_VALUES, vga)) {

View File

@@ -154,7 +154,8 @@ export default class {
id: await generateToken(),
user_id: userId,
expiration:
Date.now() + (typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
Date.now() +
(typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
})
await this._tokens.add(token)

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import {
endsWith,
filter,
find,
findIndex,
includes,
once,
range,
@@ -20,9 +19,13 @@ import {
startsWith,
trim,
} from 'lodash'
import {
chainVhd,
createSyntheticStream as createVhdReadStream,
mergeVhd,
} from 'vhd-lib'
import createSizeStream from '../size-stream'
import vhdMerge, { chainVhd } from '../vhd-merge'
import xapiObjectToXo from '../xapi-object-to-xo'
import { lvs, pvs } from '../lvm'
import {
@@ -551,7 +554,7 @@ export default class {
const backup = `${dir}/${backups[j]}`
try {
mergedDataSize += await vhdMerge(handler, parent, handler, backup)
mergedDataSize += await mergeVhd(handler, parent, handler, backup)
} catch (e) {
console.error('Unable to use vhd-util.', e)
throw e
@@ -566,33 +569,6 @@ export default class {
return mergedDataSize
}
async _listDeltaVdiDependencies (handler, filePath) {
const dir = dirname(filePath)
const filename = basename(filePath)
const backups = await this._listVdiBackups(handler, dir)
// Search file. (delta or full backup)
const i = findIndex(
backups,
backup => getVdiTimestamp(backup) === getVdiTimestamp(filename)
)
if (i === -1) {
throw new Error('VDI to import not found in this remote.')
}
// Search full backup.
let j
for (j = i; j >= 0 && isDeltaVdiBackup(backups[j]); j--);
if (j === -1) {
throw new Error(`Unable to found full vdi backup of: ${filePath}`)
}
return backups.slice(j, i + 1)
}
// -----------------------------------------------------------------
async _listDeltaVmBackups (handler, dir) {
@@ -840,17 +816,17 @@ export default class {
await Promise.all(
mapToArray(delta.vdis, async (vdi, id) => {
const vdisFolder = `${basePath}/${dirname(vdi.xoPath)}`
const backups = await this._listDeltaVdiDependencies(
handler,
`${basePath}/${vdi.xoPath}`
)
let path = `${basePath}/${vdi.xoPath}`
try {
await handler.getSize(path)
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
streams[`${id}.vhd`] = await Promise.all(
mapToArray(backups, async backup =>
handler.createReadStream(`${vdisFolder}/${backup}`)
)
)
path = path.replace(/_delta\.vhd$/, '_full.vhd')
}
streams[`${id}.vhd`] = await createVhdReadStream(handler, path)
})
)
@@ -1038,13 +1014,13 @@ export default class {
// VHD path may need to be fixed.
return endsWith(vhdPath, '_delta.vhd')
? pFromCallback(cb => stat(vhdPath, cb)).then(
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
}
}
}
)
)
: vhdPath
})
.then(vhdPath => execa('vhdimount', [vhdPath, mountDir]))

View File

@@ -0,0 +1,332 @@
import defer from 'golike-defer'
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser as createPairsParser } from 'parse-pairs'
import { normalize } from 'path'
import { readdir, rmdir, stat } from 'fs-extra'
import { ZipFile } from 'yazl'
import { lvs, pvs } from '../lvm'
import { resolveSubpath, tmpDir } from '../utils'
const IGNORED_PARTITION_TYPES = {
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
0x05: true,
0x0f: true,
0x15: true,
0x5e: true,
0x5f: true,
0x85: true,
0x91: true,
0x9b: true,
0xc5: true,
0xcf: true,
0xd5: true,
0x82: true, // swap
}
const PARTITION_TYPE_NAMES = {
0x07: 'NTFS',
0x0c: 'FAT',
0x83: 'linux',
}
const RE_VHDI = /^vhdi(\d+)$/
const parsePartxLine = createPairsParser({
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
valueTransform: (value, key) =>
key === 'start' || key === 'size'
? +value
: key === 'type' ? PARTITION_TYPE_NAMES[+value] || value : value,
})
const listLvmLogicalVolumes = defer(
async ($defer, devicePath, partition, results = []) => {
const pv = await mountLvmPhysicalVolume(devicePath, partition)
$defer(pv.unmount)
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], pv.path)
const partitionId = partition !== undefined ? partition.id : ''
lvs.forEach((lv, i) => {
const name = lv.lv_name
if (name !== '') {
results.push({
id: `${partitionId}/${lv.vg_name}/${name}`,
name,
size: lv.lv_size,
})
}
})
return results
}
)
async function mountLvmPhysicalVolume (devicePath, partition) {
const args = []
if (partition !== undefined) {
args.push('-o', partition.start * 512)
}
args.push('--show', '-f', devicePath)
const path = (await execa.stdout('losetup', args)).trim()
await execa('pvscan', ['--cache', path])
return {
path,
unmount: async () => {
try {
const vgNames = await pvs('vg_name', path)
await execa('vgchange', ['-an', ...vgNames])
} finally {
await execa('losetup', ['-d', path])
}
},
}
}
const mountPartition = defer(async ($defer, devicePath, partition) => {
const options = ['loop', 'ro']
if (partition !== undefined) {
const { start } = partition
if (start !== undefined) {
options.push(`offset=${start * 512}`)
}
}
const path = await tmpDir()
$defer.onFailure(rmdir, path)
const mount = options =>
execa('mount', [
`--options=${options.join(',')}`,
`--source=${devicePath}`,
`--target=${path}`,
])
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
// another fs, try without
try {
await mount([...options, 'norecovery'])
} catch (error) {
await mount(options)
}
const unmount = async () => {
await execa('umount', ['--lazy', path])
return rmdir(path)
}
$defer.onFailure(unmount)
return { path, unmount }
})
// - [x] list partitions
// - [x] list files in a partition
// - [x] list files in a bare partition
// - [x] list LVM partitions
//
// - [ ] partitions with unmount debounce
// - [ ] handle directory restore
// - [ ] handle multiple entries restore (both dirs and files)
// - [ ] by default use common path as root
// - [ ] handle LVM partitions on multiple disks
// - [ ] find mounted disks/partitions on start (in case of interruptions)
//
// - [ ] manual mount/unmount (of disk) for advance file restore
// - could it stay mounted during the backup process?
// - [ ] mountDisk (VHD)
// - [ ] unmountDisk (only for manual mount)
// - [ ] getMountedDisks
// - [ ] mountPartition (optional)
// - [ ] getMountedPartitions
// - [ ] unmountPartition
export default class BackupNgFileRestore {
constructor (app) {
this._app = app
this._mounts = { __proto__: null }
}
@defer
async fetchBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
paths
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer.onFailure(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer.onFailure(partition.unmount)
const zip = new ZipFile()
paths.forEach(file => {
zip.addFile(resolveSubpath(partition.path, file), normalize('./' + file))
})
zip.end()
return zip.outputStream.on('end', () =>
partition.unmount().then(disk.unmount)
)
}
@defer
async listBackupNgDiskPartitions ($defer, remoteId, diskId) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
return this._listPartitions(disk.path)
}
@defer
async listBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
path
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer(partition.unmount)
path = resolveSubpath(partition.path, path)
const entriesMap = {}
await Promise.all(
(await readdir(path)).map(async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
})
)
return entriesMap
}
async _findPartition (devicePath, partitionId) {
const partitions = await this._listPartitions(devicePath, false)
const partition = partitions.find(_ => _.id === partitionId)
if (partition === undefined) {
throw new Error(`partition ${partitionId} not found`)
}
return partition
}
async _listPartitions (devicePath, inspectLvmPv = true) {
const stdout = await execa.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
devicePath,
])
const promises = []
const partitions = []
splitLines(stdout).forEach(line => {
const partition = parsePartxLine(line)
let { type } = partition
if (type == null || (type = +type) in IGNORED_PARTITION_TYPES) {
return
}
if (inspectLvmPv && type === 0x8e) {
promises.push(listLvmLogicalVolumes(devicePath, partition, partitions))
return
}
partitions.push(partition)
})
await Promise.all(promises)
return partitions
}
@defer
async _mountDisk ($defer, remoteId, diskId) {
const handler = await this._app.getRemoteHandler(remoteId)
if (handler._getFilePath === undefined) {
throw new Error(`this remote is not supported`)
}
const diskPath = handler._getFilePath(diskId)
const mountDir = await tmpDir()
$defer.onFailure(rmdir, mountDir)
await execa('vhdimount', [diskPath, mountDir])
const unmount = async () => {
await execa('fusermount', ['-uz', mountDir])
return rmdir(mountDir)
}
$defer.onFailure(unmount)
let max = 0
let maxEntry
const entries = await readdir(mountDir)
entries.forEach(entry => {
const matches = RE_VHDI.exec(entry)
if (matches !== null) {
const value = +matches[1]
if (value > max) {
max = value
maxEntry = entry
}
}
})
if (max === 0) {
throw new Error('no disks found')
}
return {
path: `${mountDir}/${maxEntry}`,
unmount,
}
}
@defer
async _mountPartition ($defer, devicePath, partitionId) {
if (partitionId === undefined) {
return mountPartition(devicePath)
}
if (partitionId.includes('/')) {
const [pvId, vgName, lvName] = partitionId.split('/')
const lvmPartition =
pvId !== '' ? await this._findPartition(devicePath, pvId) : undefined
const pv = await mountLvmPhysicalVolume(devicePath, lvmPartition)
const unmountQueue = [pv.unmount]
const unmount = async () => {
let fn
while ((fn = unmountQueue.pop()) !== undefined) {
await fn()
}
}
$defer.onFailure(unmount)
await execa('vgchange', ['-ay', vgName])
unmountQueue.push(() => execa('vgchange', ['-an', vgName]))
const partition = await mountPartition(
(await lvs(['lv_name', 'lv_path'], vgName)).find(
_ => _.lv_name === lvName
).lv_path
)
unmountQueue.push(partition.unmount)
return { ...partition, unmount }
}
return mountPartition(
devicePath,
await this._findPartition(devicePath, partitionId)
)
}
}

View File

@@ -209,28 +209,43 @@ export default class Jobs {
throw new Error(`job ${id} is already running`)
}
const executor = this._executors[job.type]
const { type } = job
const executor = this._executors[type]
if (executor === undefined) {
throw new Error(`cannot run job ${id}: no executor for type ${job.type}`)
throw new Error(`cannot run job ${id}: no executor for type ${type}`)
}
let data
if (type === 'backup') {
// $FlowFixMe only defined for BackupJob
const settings = job.settings['']
data = {
// $FlowFixMe only defined for BackupJob
mode: job.mode,
reportWhen: (settings && settings.reportWhen) || 'failure',
}
}
const logger = this._logger
const runJobId = logger.notice(`Starting execution of ${id}.`, {
data,
event: 'job.start',
userId: job.userId,
jobId: id,
// $FlowFixMe only defined for CallJob
key: job.key,
type,
})
runningJobs[id] = runJobId
let session
try {
const app = this._app
const session = app.createUserConnection()
session = app.createUserConnection()
session.set('user_id', job.userId)
const status = await executor({
await executor({
app,
cancelToken,
job,
@@ -244,8 +259,7 @@ export default class Jobs {
runJobId,
})
session.close()
app.emit('job:terminated', status)
app.emit('job:terminated', runJobId, job, schedule)
} catch (error) {
logger.error(`The execution of ${id} has failed.`, {
event: 'job.end',
@@ -255,6 +269,9 @@ export default class Jobs {
throw error
} finally {
delete runningJobs[id]
if (session !== undefined) {
session.close()
}
}
}

View File

@@ -32,11 +32,11 @@ export default class Logs {
const onData =
keep !== 0
? () => {
if (--keep === 0) {
stream.on('data', deleteEntry)
stream.removeListener('data', onData)
if (--keep === 0) {
stream.on('data', deleteEntry)
stream.removeListener('data', onData)
}
}
}
: deleteEntry
stream.on('data', onData)
@@ -51,4 +51,22 @@ export default class Logs {
.getStore('logs')
.then(store => new LevelDbLogger(store, namespace))
}
async getLogs (namespace) {
const logger = await this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger
.createReadStream()
.on('data', data => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
}
}

View File

@@ -1,7 +1,7 @@
import { getHandler } from '@xen-orchestra/fs'
import { noSuchObject } from 'xo-common/api-errors'
import { forEach, mapToArray } from '../utils'
import { getHandler } from '../remote-handlers'
import { Remotes } from '../models/remote'
// ===================================================================

View File

@@ -15,7 +15,7 @@ export type Schedule = {|
jobId: string,
name: string,
timezone?: string,
userId: string
userId: string,
|}
const normalize = schedule => {
@@ -45,7 +45,7 @@ export default class Scheduling {
first: Function,
get: Function,
remove: Function,
update: Function
update: Function,
|}
_runs: { __proto__: null, [string]: () => void }

View File

@@ -1,7 +1,7 @@
// @flow
import mergeVhd_ from '../../vhd-merge'
import { type Remote, getHandler } from '../../remote-handlers'
import { type Remote, getHandler } from '@xen-orchestra/fs'
import { mergeVhd as mergeVhd_ } from 'vhd-lib'
export function mergeVhd (
parentRemote: Remote,

View File

@@ -392,14 +392,16 @@ export default class {
return servers
}
getXapiVmStats (vm, granularity) {
const xapi = this.getXapi(vm)
return this._stats.getVmPoints(xapi, vm._xapiId, granularity)
getXapiVmStats (vmId, granularity) {
return this._stats.getVmStats(this.getXapi(vmId), vmId, granularity)
}
getXapiHostStats (host, granularity) {
const xapi = this.getXapi(host)
return this._stats.getHostPoints(xapi, host._xapiId, granularity)
getXapiHostStats (hostId, granularity) {
return this._stats.getHostStats(this.getXapi(hostId), hostId, granularity)
}
getXapiSrStats (srId, granularity) {
return this._stats.getSrStats(this.getXapi(srId), srId, granularity)
}
async mergeXenPools (sourceId, targetId, force = false) {

View File

@@ -140,7 +140,11 @@ export default class Xo extends EventEmitter {
}).then(
result => {
if (result != null) {
res.end(JSON.stringify(result))
if (typeof result.pipe === 'function') {
result.pipe(res)
} else {
res.end(JSON.stringify(result))
}
}
},
error => {

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "xo-vmdk-to-vhd",
"version": "0.0.12",
"version": "0.1.1",
"license": "AGPL-3.0",
"description": "JS lib streaming a vmdk file to a vhd",
"keywords": [
@@ -23,44 +23,33 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.18.0",
"@babel/runtime": "^7.0.0-beta.44",
"child-process-promise": "^2.0.3",
"deflate-js": "^0.2.3",
"fs-promise": "^2.0.0",
"pipette": "^0.9.3"
"pipette": "^0.9.3",
"promise-toolbox": "^0.9.5",
"tmp": "^0.0.33",
"vhd-lib": "^0.0.0"
},
"devDependencies": {
"babel-cli": "^6.18.0",
"babel-plugin-transform-runtime": "^6.15.0",
"babel-preset-env": "^1.0.0",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"mocha": "^4.0.1",
"must": "^0.13.2",
"rimraf": "^2.5.4"
"event-to-promise": "^0.8.0",
"execa": "^0.10.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"dev-test": "mocha --watch --reporter=min \"dist/**/*.spec.js\"",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"test-disabled": "mocha \"dist/**/*.spec.js\""
},
"babel": {
"plugins": [
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
]
]
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -1 +1,16 @@
export { convertFromVMDK as default } from './vhd-write'
import { createReadableSparseStream } from 'vhd-lib'
import { VMDKDirectParser, readVmdkGrainTable } from './vmdk-read'
async function convertFromVMDK (vmdkReadStream, table) {
const parser = new VMDKDirectParser(vmdkReadStream)
const header = await parser.readHeader()
return createReadableSparseStream(
header.capacitySectors * 512,
header.grainSizeSectors * 512,
table,
parser.blockIterator()
)
}
export { convertFromVMDK as default, readVmdkGrainTable }

View File

@@ -1,340 +0,0 @@
'use strict'
import { open, write } from 'fs-promise'
import stream from 'stream'
import { VMDKDirectParser } from './vmdk-read'
const footerCookie = 'conectix'
const creatorApp = 'xo '
// it looks like everybody is using Wi2k
const osString = 'Wi2k'
const headerCookie = 'cxsparse'
const fixedHardDiskType = 2
const dynamicHardDiskType = 3
const sectorSize = 512
export function computeChecksum (buffer) {
let sum = 0
for (let i = 0; i < buffer.length; i++) {
sum += buffer[i]
}
// http://stackoverflow.com/a/1908655/72637 the >>> prevents the number from going negative
return ~sum >>> 0
}
class Block {
constructor (blockSize) {
const bitmapSize = blockSize / sectorSize / 8
const bufferSize =
Math.ceil((blockSize + bitmapSize) / sectorSize) * sectorSize
this.buffer = Buffer.alloc(bufferSize)
this.bitmapBuffer = this.buffer.slice(0, bitmapSize)
this.dataBuffer = this.buffer.slice(bitmapSize)
this.bitmapBuffer.fill(0xff)
}
writeData (buffer, offset = 0) {
buffer.copy(this.dataBuffer, offset)
}
async writeOnFile (file) {
await write(file, this.buffer, 0, this.buffer.length)
}
}
class SparseExtent {
constructor (dataSize, blockSize, startOffset) {
this.table = createEmptyTable(dataSize, blockSize)
this.blockSize = blockSize
this.startOffset = (startOffset + this.table.buffer.length) / sectorSize
}
get entryCount () {
return this.table.entryCount
}
_writeBlock (blockBuffer, tableIndex, offset) {
if (blockBuffer.length + offset > this.blockSize) {
throw new Error('invalid block geometry')
}
let entry = this.table.entries[tableIndex]
if (entry === undefined) {
entry = new Block(this.blockSize)
this.table.entries[tableIndex] = entry
}
entry.writeData(blockBuffer, offset)
}
writeBuffer (buffer, offset = 0) {
const startBlock = Math.floor(offset / this.blockSize)
const endBlock = Math.ceil((offset + buffer.length) / this.blockSize)
for (let i = startBlock; i < endBlock; i++) {
const blockDelta = offset - i * this.blockSize
let blockBuffer, blockOffset
if (blockDelta > 0) {
blockBuffer = buffer.slice(0, (i + 1) * this.blockSize - offset)
blockOffset = blockDelta
} else {
blockBuffer = buffer.slice(
-blockDelta,
(i + 1) * this.blockSize - offset
)
blockOffset = 0
}
this._writeBlock(blockBuffer, i, blockOffset)
}
}
async writeOnFile (file) {
let currentOffset = this.startOffset
for (let i = 0; i < this.table.entryCount; i++) {
const block = this.table.entries[i]
if (block !== undefined) {
this.table.buffer.writeUInt32BE(currentOffset, i * 4)
currentOffset += block.buffer.length / sectorSize
}
}
await write(file, this.table.buffer, 0, this.table.buffer.length)
for (let i = 0; i < this.table.entryCount; i++) {
const block = this.table.entries[i]
if (block !== undefined) {
await block.writeOnFile(file)
}
}
}
}
export class VHDFile {
constructor (virtualSize, timestamp) {
this.geomtry = computeGeometryForSize(virtualSize)
this.timestamp = timestamp
this.blockSize = 0x00200000
this.sparseFile = new SparseExtent(
this.geomtry.actualSize,
this.blockSize,
sectorSize * 3
)
}
writeBuffer (buffer, offset = 0) {
this.sparseFile.writeBuffer(buffer, offset)
}
async writeFile (fileName) {
const fileFooter = createFooter(
this.geomtry.actualSize,
this.timestamp,
this.geomtry,
dynamicHardDiskType,
512,
0
)
const diskHeader = createDynamicDiskHeader(
this.sparseFile.entryCount,
this.blockSize
)
const file = await open(fileName, 'w')
await write(file, fileFooter, 0, fileFooter.length)
await write(file, diskHeader, 0, diskHeader.length)
await this.sparseFile.writeOnFile(file)
await write(file, fileFooter, 0, fileFooter.length)
}
}
export function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrack
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrack = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
} else {
sectorsPerTrack = 17
cylinderTimesHeads = totalSectors / sectorsPerTrack
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrack = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrack = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
}
}
const cylinders = Math.floor(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrack * sectorSize
return { cylinders, heads, sectorsPerTrack, actualSize }
}
export function createFooter (
size,
timestamp,
geometry,
diskType,
dataOffsetLow = 0xffffffff,
dataOffsetHigh = 0xffffffff
) {
const footer = Buffer.alloc(512)
Buffer.from(footerCookie, 'ascii').copy(footer)
footer.writeUInt32BE(2, 8)
footer.writeUInt32BE(0x00010000, 12)
footer.writeUInt32BE(dataOffsetHigh, 16)
footer.writeUInt32BE(dataOffsetLow, 20)
footer.writeUInt32BE(timestamp, 24)
Buffer.from(creatorApp, 'ascii').copy(footer, 28)
Buffer.from(osString, 'ascii').copy(footer, 36)
// do not use & 0xFFFFFFFF to extract lower bits, that would propagate a negative sign if the 2^31 bit is one
const sizeHigh = Math.floor(size / Math.pow(2, 32)) % Math.pow(2, 32)
const sizeLow = size % Math.pow(2, 32)
footer.writeUInt32BE(sizeHigh, 40)
footer.writeUInt32BE(sizeLow, 44)
footer.writeUInt32BE(sizeHigh, 48)
footer.writeUInt32BE(sizeLow, 52)
footer.writeUInt16BE(geometry['cylinders'], 56)
footer.writeUInt8(geometry['heads'], 58)
footer.writeUInt8(geometry['sectorsPerTrack'], 59)
footer.writeUInt32BE(diskType, 60)
const checksum = computeChecksum(footer)
footer.writeUInt32BE(checksum, 64)
return footer
}
export function createDynamicDiskHeader (tableEntries, blockSize) {
const header = Buffer.alloc(1024)
Buffer.from(headerCookie, 'ascii').copy(header)
// hard code no next data
header.writeUInt32BE(0xffffffff, 8)
header.writeUInt32BE(0xffffffff, 12)
// hard code table offset
header.writeUInt32BE(0, 16)
header.writeUInt32BE(sectorSize * 3, 20)
header.writeUInt32BE(0x00010000, 24)
header.writeUInt32BE(tableEntries, 28)
header.writeUInt32BE(blockSize, 32)
const checksum = computeChecksum(header)
header.writeUInt32BE(checksum, 36)
return header
}
export function createEmptyTable (dataSize, blockSize) {
const blockCount = Math.ceil(dataSize / blockSize)
const tableSizeSectors = Math.ceil(blockCount * 4 / sectorSize)
const buffer = Buffer.alloc(tableSizeSectors * sectorSize, 0xff)
return { entryCount: blockCount, buffer: buffer, entries: [] }
}
export class ReadableRawVHDStream extends stream.Readable {
constructor (size, vmdkParser) {
super()
this.size = size
const geometry = computeGeometryForSize(size)
this.footer = createFooter(
size,
Math.floor(Date.now() / 1000),
geometry,
fixedHardDiskType
)
this.position = 0
this.vmdkParser = vmdkParser
this.done = false
this.busy = false
this.currentFile = []
}
filePadding (paddingLength) {
if (paddingLength !== 0) {
const chunkSize = 1024 * 1024 // 1Mo
const chunkCount = Math.floor(paddingLength / chunkSize)
for (let i = 0; i < chunkCount; i++) {
this.currentFile.push(() => {
const paddingBuffer = Buffer.alloc(chunkSize)
return paddingBuffer
})
}
this.currentFile.push(() => {
const paddingBuffer = Buffer.alloc(paddingLength % chunkSize)
return paddingBuffer
})
}
}
async pushNextBlock () {
const next = await this.vmdkParser.next()
if (next === null) {
const paddingLength = this.size - this.position
this.filePadding(paddingLength)
this.currentFile.push(() => this.footer)
this.currentFile.push(() => {
this.done = true
return null
})
} else {
const offset = next.lbaBytes
const buffer = next.grain
const paddingLength = offset - this.position
if (paddingLength < 0) {
process.nextTick(() =>
this.emit(
'error',
'This VMDK file does not have its blocks in the correct order'
)
)
}
this.filePadding(paddingLength)
this.currentFile.push(() => buffer)
this.position = offset + buffer.length
}
return this.pushFileUntilFull()
}
// returns true if the file is empty
pushFileUntilFull () {
while (true) {
if (this.currentFile.length === 0) {
break
}
const result = this.push(this.currentFile.shift()())
if (!result) {
break
}
}
return this.currentFile.length === 0
}
async pushNextUntilFull () {
while (!this.done && (await this.pushNextBlock())) {}
}
_read () {
if (this.busy || this.done) {
return
}
if (this.pushFileUntilFull()) {
this.busy = true
this.pushNextUntilFull()
.then(() => {
this.busy = false
})
.catch(error => {
process.nextTick(() => this.emit('error', error))
})
}
}
}
export async function convertFromVMDK (vmdkReadStream) {
const parser = new VMDKDirectParser(vmdkReadStream)
const header = await parser.readHeader()
return new ReadableRawVHDStream(header.capacitySectors * sectorSize, parser)
}

Some files were not shown because too many files have changed in this diff Show More