Compare commits

...

76 Commits

Author SHA1 Message Date
Julien Fontanet
4bb8ce8779 feat(vhd-lib): 0.1.0 2018-05-21 10:03:37 +02:00
Julien Fontanet
58eb6a8b5f feat(xo-server): 5.19.7 2018-05-18 18:44:34 +02:00
Julien Fontanet
52f6a79e01 fix(xo-server/backupNg/logs): include merge/transfer size (#2965) 2018-05-18 18:44:07 +02:00
Julien Fontanet
129f79d44b feat(xo-web): 5.19.5 2018-05-18 18:42:31 +02:00
Julien Fontanet
385c3eb563 feat(xo-vmdk-to-vhd): 0.1.1 2018-05-18 18:40:26 +02:00
Julien Fontanet
e56be51b45 chore(xo-server/backups-ng): remove incorrect TODO 2018-05-18 17:14:50 +02:00
Olivier Lambert
24ae65b254 fix(xo-server/sr.createNfs): nfsVersion → nfsOptions (#2904) 2018-05-18 16:28:02 +02:00
badrAZ
d5dffbacbd fix(xo-web/FormattedDuration): handle duration < 0 seconds (#2964) 2018-05-18 15:06:23 +02:00
Julien Fontanet
c6ae969a82 fix(xo-server/https): ask for passphrase (#2963)
Fixes #2962
2018-05-18 15:05:49 +02:00
Nicolas Raynaud
005a9fdc01 fix(xo-vmdk-to-vhd): various bugs (#2961) 2018-05-18 14:02:19 +02:00
Jerome Charaoui
f505d4d911 Fix SR creation when using options or NFSv4 (#2960) 2018-05-17 22:12:09 +02:00
badrAZ
8ada6b121e fix(backup-ng/logs): handle the case when transfer duration equals 0 (#2954) 2018-05-17 16:58:29 +02:00
Julien Fontanet
b9a87efb0d fix(xo-server/backupNg): dont fail on corrupted VHDs (#2957)
Corrupted VHD files (usually uncleaned temporary) could fail the job.
2018-05-17 11:27:02 +02:00
Pierre Donias
89485a82d2 feat(xo-web): make many objects' UUID copiable (#2955)
Fixes #2925

- host/tab-network
- pool/tab-network
- vm/tab-disks
- vm/tab-network
- vm/tab-snapshots
2018-05-16 17:39:47 +02:00
Pierre Donias
451f87c6b4 feat(xo-web/servers): allow unauthorized cert. when adding server (#2953)
Fixes #2926
2018-05-16 13:44:27 +02:00
Rajaa.BARHTAOUI
c3cb5a3221 feat(xo-server,xo-web): VM HA options (#2946)
Fixes #2917
2018-05-16 13:27:40 +02:00
Julien Fontanet
458609ed2e feat(xo-server): 5.19.6 2018-05-16 10:32:59 +02:00
Julien Fontanet
fcec8113f3 fix(xo-server/backupNg): await writeStream (#2951) 2018-05-16 10:32:38 +02:00
Julien Fontanet
ebbd882ee4 feat(xo-web): 5.19.4 2018-05-15 17:44:25 +02:00
Julien Fontanet
0506e19a66 chore(xo-server/backups-ng): update todo list 2018-05-15 17:44:09 +02:00
Pierre Donias
ecc62e4f54 fix(xo-web/xosan): install packs button condition (#2950) 2018-05-15 17:40:40 +02:00
Julien Fontanet
2b95eb4e4d feat(xo-web): 5.19.3 2018-05-15 16:11:53 +02:00
Julien Fontanet
bcde9e0f74 feat(xo-server): 5.19.5 2018-05-15 16:11:34 +02:00
Pierre Donias
114501ebc7 feat(XOSAN): allow user to update packs (#2782) 2018-05-15 16:11:04 +02:00
badrAZ
ebab7c0867 fix(backup-ng/logs): handle the case when transfer/merge duration equals 0 (#2949) 2018-05-15 16:10:17 +02:00
Julien Fontanet
0e2270fb6e feat(xo-web): 5.19.2 2018-05-15 14:46:33 +02:00
Julien Fontanet
593493ec0c feat(xo-server): 5.19.4 2018-05-15 14:46:07 +02:00
Julien Fontanet
d92898a806 feat(xo-vmdk-to-vhd): 0.1.0 2018-05-15 14:45:19 +02:00
Julien Fontanet
7890e46551 feat(xo-server-backup-reports): 0.11.0 2018-05-15 14:42:32 +02:00
Julien Fontanet
ef942a6209 feat(Backup NG): implrtment logs and reports (#2869) 2018-05-15 14:40:11 +02:00
Nicolas Raynaud
fdde916388 feat(xo-web/vms-import): redirect to VM or home page (#2942)
If a single VM has been imported, redirect to its page.

If multiple VMs has been imported, redirect to the homepage with all other VMs filtered out.
2018-05-14 17:42:11 +02:00
Julien Fontanet
31314d201b fix(xo-server/backupNg/delta): await deletion/merge 2018-05-14 15:38:11 +02:00
Julien Fontanet
a29a949c51 fix(xo-server/backupNg/delta): deleteFirst iff retention > 1 2018-05-14 15:37:09 +02:00
Julien Fontanet
cc1ce8c5f8 chore: update yarn.lock 2018-05-14 13:53:03 +02:00
Nicolas Raynaud
a21bf4ebe5 chore: major VHD code restructuring (#2808)
Related to #2792
2018-05-14 13:48:16 +02:00
Julien Fontanet
3d0420dbd9 fix(xo-server/backupNg): clean metadata on VM iself (#2945) 2018-05-14 11:47:34 +02:00
Julien Fontanet
04c74dd30f fix: missing bit of last commit 2018-05-11 20:17:02 +02:00
Julien Fontanet
2f256291ae fix(xo-server/backup legacy/delta import): autofix path (#2941)
Because the path might be incorrect and be `_full.vhd` instead of `_delta.vhd`.

I know…
2018-05-11 20:16:00 +02:00
Julien Fontanet
bcb66a4145 fix(xo-server/backup NG/listReplicatedVms): avoid templates and snapshots 2018-05-11 18:45:47 +02:00
Fabrice Marsaud
2d9368062e fix(xo-web/xoa-updater): dont block paying plans when updater is on error (#2939) 2018-05-11 17:18:24 +02:00
Pierre Donias
b110bacf61 fix(xo-server/patches): bulk install (#2935) 2018-05-09 17:32:11 +02:00
Julien Fontanet
78afdc0af5 feat(xo-web): 5.19.1 2018-05-07 13:19:59 +02:00
Julien Fontanet
ad6cd7985a feat(xo-server): 5.19.3 2018-05-07 13:07:21 +02:00
Julien Fontanet
a61661776d fix(xo-server/listVmBackupsNg): handle missing vhds field
This field is only present on delta backups
2018-05-07 11:02:14 +02:00
Julien Fontanet
1a9ebddcab fix(xo-server/listBackupNgPartitionFiles): missing await
Strange things though, it works in dev compilation mode…

Fixes #2929
2018-05-07 10:33:44 +02:00
Julien Fontanet
7ab907a854 feat(xo-server/backup NG): file restore (#2889) 2018-05-06 18:38:47 +02:00
Olivier Lambert
68a34f7cdb feat(changelog): update changelog 2018-05-05 12:05:56 +02:00
Rajaa.BARHTAOUI
da4ff3082d feat(xo-web/health): list VM snapshots related to non-existing backup jobs (#2899)
Fixes #2828
2018-05-04 15:59:11 +02:00
Rajaa.BARHTAOUI
9c05a59b5f feat(xo-web/SR/advanced): add VDI UUID in coalesce table (#2919)
Fixes #2903
2018-05-04 12:28:15 +02:00
Rajaa.BARHTAOUI
6780146505 feat(xo-web/patches): better "restart host" warnings (#2909)
Fixes #2866
2018-05-04 10:41:02 +02:00
Julien Fontanet
2758833fc6 feat(xo-server): 5.19.2 2018-05-03 19:12:48 +02:00
Julien Fontanet
2786d7ec46 fix(vhd/createSyntheticReadStream): sectorsPerBlock → sectorsPerBlockData 2018-05-03 19:11:01 +02:00
Julien Fontanet
945a2006c9 feat(xo-server/legacy backup/import): use synthetic stream (#2913) 2018-05-02 17:48:13 +02:00
badrAZ
b9e574e32f fix(SR/tab-stats): fix IOPS's and IOwait's values format (#2914) 2018-05-02 14:17:13 +02:00
Julien Fontanet
34f1ef1680 feat(xo-server): 5.19.1 2018-05-01 17:26:15 +02:00
Julien Fontanet
4ac4310bc1 fix(xo-server/importDeltaVm): remove extra return
It prevented the VBD creation in case of a new VDI.
2018-05-01 17:25:46 +02:00
Julien Fontanet
a10997ca66 feat(xo-web): 5.19.0 2018-05-01 16:13:20 +02:00
Julien Fontanet
0e52a4c7dc feat(xo-server): 5.19.0 2018-05-01 16:12:44 +02:00
Julien Fontanet
a4b3e22c2b feat(xo-server-perf-alert): 0.1.0 2018-05-01 16:10:10 +02:00
Julien Fontanet
441bd7c754 feat(xo-server-auth-saml): 0.5.2 2018-05-01 16:09:06 +02:00
badrAZ
ddbd32d1cb chore(xo-web/backup-ng/new): single effect to toggle modes (#2908) 2018-04-30 11:19:45 +02:00
Pierre Donias
a5b0cbeaea feat(xo-server-perf-alert): SR disk space (#2905) 2018-04-27 17:38:19 +02:00
Rajaa.BARHTAOUI
c6f3b2b1ce feat(xo-web/XOA update): display "Downgrade" when trial is over (#2845)
Fixes #1483
2018-04-27 10:05:27 +02:00
Pierre Donias
3d869d9fa1 chore(xo-web/health): remove irrelevant actions on VDIs (#2882) 2018-04-26 17:37:28 +02:00
Julien Fontanet
7a5229741f chore: disable tests on Node 9 due to upath error 2018-04-26 17:21:00 +02:00
Rajaa.BARHTAOUI
78e0c2d8fa feat(xo-web/SortedTable): support link actions (#2893) 2018-04-26 15:46:10 +02:00
Pierre Donias
5928984069 feat(xo-web/home): sort by container name (#2901)
Fixes #2680
2018-04-26 15:21:48 +02:00
Julien Fontanet
61a472f108 chore(xo-server/vhd/createReadStream): improve genericity (#2865)
It should now be pretty easy to make it work to generate a delta VHD, which should be very useful when mergin multiple deltas together (during deletion.
2018-04-24 18:06:58 +02:00
Julien Fontanet
e45f78ea20 fix(xo-web/backup-ng): delete backups sequentially (#2855)
- sequentially: to limit merge issues
- from newest to oldest: to avoid unnecessary merges
2018-04-23 16:35:34 +02:00
Olivier Lambert
b3ae9d88eb feat(vm): expose vendor device features in advanced tab. Fixes #2883 (#2894) 2018-04-23 15:02:40 +02:00
Pierre Donias
f7f26537be fix(xo-web/vm/network): bad import after file name change (#2892)
Fixes #2891

Introduced by 769c32a1b1
2018-04-23 14:08:56 +02:00
Julien Fontanet
96848fc6d4 fix(xo-server/importDeltaVm): create VBDs earlier (#2885)
To avoid orphan VDIs AMAP.
2018-04-20 17:33:36 +02:00
Julien Fontanet
51e6f0c79f feat(xo-server-usage-report): 0.4.2 2018-04-19 09:39:47 +02:00
badrAZ
4f94ad40b7 fix(xo-server-usage-report): handle missing metrics (#2880) 2018-04-18 16:30:30 +02:00
Pierre Donias
43e1eb9939 fix(xo-web/Ellipsis): handle patchedRender wrapping (#2881)
Also use Ellipsis on resource set name in home/VM view
2018-04-18 11:00:09 +02:00
Julien Fontanet
1f6d7de861 feat(xo-server-usage-report): 0.4.1 2018-04-17 14:03:32 +02:00
136 changed files with 6342 additions and 3952 deletions

2
.gitignore vendored
View File

@@ -8,6 +8,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/plot.dat
/packages/xo-server/.xo-server.*

View File

@@ -1,6 +1,6 @@
language: node_js
node_js:
- stable
#- stable # disable for now due to an issue of indirect dep upath with Node 9
- 8
- 6
@@ -12,6 +12,7 @@ addons:
packages:
- qemu-utils
- blktap-utils
- vmdk-stream-converter
before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,54 @@
{
"name": "@xen-orchestra/fs",
"version": "0.0.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.44",
"@marsaud/smb2-promise": "^0.2.1",
"execa": "^0.10.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.9.5",
"through2": "^2.0.3",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -1,11 +1,11 @@
// @flow
import getStream from 'get-stream'
import { randomBytes } from 'crypto'
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
import { type Readable, type Writable } from 'stream'
import { fromEvent, ignoreErrors } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
type Data = Buffer | Readable | string
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
async test (): Promise<Object> {
const testFileName = `${Date.now()}.test`
const data = getPseudoRandomBytes(1024 * 1024)
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
let step = 'write'
try {
await this.outputFile(testFileName, data)
@@ -97,7 +97,7 @@ export default class RemoteHandlerAbstract {
}
_readFile (file: string, options?: Object): Promise<Buffer> {
return this.createReadStream(file, options).then(streamToBuffer)
return this.createReadStream(file, options).then(getStream.buffer)
}
async rename (

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import { getHandler } from '.'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test("fs test doesn't crash", async () => {
const handler = getHandler({ url: 'file://' + process.cwd() })
const result = await handler.test()
expect(result.success).toBeTruthy()
})

View File

@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerSmb from './smb'
export type { default as RemoteHandler } from './abstract'
export type Remote = { url: string }
const HANDLERS = {

View File

@@ -1,7 +1,9 @@
import Smb2 from '@marsaud/smb2-promise'
import { lastly as pFinally } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
import { noop, pFinally } from '../utils'
const noop = () => {}
// Normalize the error code for file not found.
const normalizeError = error => {

View File

@@ -1,9 +1,104 @@
# ChangeLog
## **5.20.0** (planned 2018-05-31)
### Enhancements
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
### Bugs
## **5.19.0** (2018-05-01)
### Enhancements
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
### Bugs
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
## **5.18.0** (2018-03-31)
### Enhancements
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
### Bugs
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
## **5.17.0** (2018-03-02)
### Enhancements
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
@@ -22,6 +117,9 @@
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
### Bugs
@@ -41,6 +139,7 @@
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
@@ -50,6 +149,8 @@
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
### Bugs
@@ -79,6 +180,7 @@
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
### Bugs

13
flow-typed/lodash.js vendored
View File

@@ -1,4 +1,12 @@
declare module 'lodash' {
declare export function forEach<K, V>(
object: { [K]: V },
iteratee: (V, K) => void
): void
declare export function groupBy<K, V>(
object: { [K]: V },
iteratee: K | ((V, K) => string)
): { [string]: V[] }
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
declare export function isEmpty(mixed): boolean
declare export function keyBy<T>(array: T[], iteratee: string): boolean
@@ -12,5 +20,10 @@ declare module 'lodash' {
iteratee: (V1, K) => V2
): { [K]: V2 }
declare export function noop(...args: mixed[]): void
declare export function some<T>(
collection: T[],
iteratee: (T, number) => boolean
): boolean
declare export function sum(values: number[]): number
declare export function values<K, V>(object: { [K]: V }): V[]
}

View File

@@ -5,6 +5,9 @@ declare module 'promise-toolbox' {
reject: T => void,
resolve: T => void,
|}
declare export function fromCallback<T>(
(cb: (error: any, value: T) => void) => void
): Promise<T>
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
declare export function ignoreErrors(): Promise<void>
declare export function timeout<T>(delay: number): Promise<T>

View File

@@ -34,16 +34,18 @@
"testEnvironment": "node",
"testPathIgnorePatterns": [
"/dist/",
"/xo-vmdk-to-vhd/",
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"transform": {
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
"/@xen-orchestra/fs/.+\\.jsx?$": "babel-7-jest",
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/vhd-lib/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-vmdk-to-vhd/.+\\.jsx?$": "babel-7-jest",
"\\.jsx?$": "babel-jest"
}
},

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-cli",
"version": "0.0.0",
"version": "0.0.1",
"license": "ISC",
"description": "",
"keywords": [],
@@ -26,10 +26,11 @@
"node": ">=4"
},
"dependencies": {
"struct-fu": "^1.2.0",
"@nraynaud/xo-fs": "^0.0.5",
"@xen-orchestra/fs": "^0.0.0",
"babel-runtime": "^6.22.0",
"exec-promise": "^0.7.0"
"exec-promise": "^0.7.0",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.1.0"
},
"devDependencies": {
"babel-cli": "^6.24.1",
@@ -38,14 +39,18 @@
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
"execa": "^0.10.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.9.5",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
"prepare": "yarn run build"
},
"babel": {
"plugins": [

View File

@@ -0,0 +1,15 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const handler = getHandler({ url: 'file:///' })
for (const vhd of args) {
try {
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
console.log('ok:', vhd)
} catch (error) {
console.error('nok:', vhd, error)
}
}
}

View File

@@ -0,0 +1,12 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
await vhd.readHeaderAndFooter()
console.log(vhd.header)
console.log(vhd.footer)
}

View File

@@ -0,0 +1,21 @@
import path from 'path'
import { createSyntheticStream } from 'vhd-lib'
import { createWriteStream } from 'fs'
import { getHandler } from '@xen-orchestra/fs'
export default async function main (args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> <output VHD>`
}
const handler = getHandler({ url: 'file:///' })
return new Promise((resolve, reject) => {
createSyntheticStream(handler, path.resolve(args[0]))
.on('error', reject)
.pipe(
createWriteStream(args[1])
.on('error', reject)
.on('finish', resolve)
)
})
}

View File

@@ -1,19 +1,44 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
import { resolve } from 'path'
import Vhd from './vhd'
import commands from './commands'
execPromise(async args => {
const vhd = new Vhd(
new RemoteHandlerLocal({ url: 'file:///' }),
resolve(args[0])
function runCommand (commands, [command, ...args]) {
if (command === undefined || command === '-h' || command === '--help') {
command = 'help'
}
const fn = commands[command]
if (fn === undefined) {
if (command === 'help') {
return `Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${this.command} ${command}`)
.join('\n\n')}`
}
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
}
return fn.call(
{
__proto__: this,
command: `${this.command} ${command}`,
},
args
)
}
await vhd.readHeaderAndFooter()
console.log(vhd._header)
console.log(vhd._footer)
})
execPromise(
runCommand.bind(
{
command: 'vhd-cli',
runCommand,
},
commands
)
)

View File

@@ -0,0 +1,28 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import command from './commands/info'
const initialDir = process.cwd()
jest.setTimeout(10000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('can run the command', async () => {
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
await command(['empty.vhd'])
})

View File

@@ -1,461 +0,0 @@
import assert from 'assert'
import fu from 'struct-fu'
import { dirname } from 'path'
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
/* eslint-disable no-unused-vars */
const HARD_DISK_TYPE_DIFFERENCING = 4
const HARD_DISK_TYPE_DYNAMIC = 3
const HARD_DISK_TYPE_FIXED = 2
const PLATFORM_CODE_NONE = 0
export const SECTOR_SIZE = 512
/* eslint-enable no-unused vars */
// ===================================================================
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
fu.struct('dataOffset', [
fu.uint32('high'), // 16
fu.uint32('low'), // 20
]),
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
fu.struct('originalSize', [
// At the creation, current size of the hard disk.
fu.uint32('high'), // 40
fu.uint32('low'), // 44
]),
fu.struct('currentSize', [
// Current size of the virtual disk. At the creation: currentSize = originalSize.
fu.uint32('high'), // 48
fu.uint32('low'), // 52
]),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.byte('reserved', 426), // 86
])
const FOOTER_SIZE = fuFooter.size
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
fu.struct('tableOffset', [
// Absolute byte offset of the Block Allocation Table.
fu.uint32('high'),
fu.uint32('low'),
]),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.byte('reserved1', 4),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
fu.struct('platformDataOffset', [
// Absolute byte offset of the locator data.
fu.uint32('high'),
fu.uint32('low'),
]),
],
8
),
fu.byte('reserved2', 256),
])
const HEADER_SIZE = fuHeader.size
// ===================================================================
// Helpers
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
// bytes[] bit manipulation
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
const setBit = (map, bit) => {
map[bit >> 3] |= 1 << (bit & 7)
}
const unsetBit = (map, bit) => {
map[bit >> 3] &= ~(1 << (bit & 7))
}
const addOffsets = (...offsets) =>
offsets.reduce(
(a, b) =>
b == null
? a
: typeof b === 'object'
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
: { bytes: a.bytes + b, bits: a.bits },
{ bytes: 0, bits: 0 }
)
const pack = (field, value, buf, offset) => {
field.pack(value, buf, addOffsets(field.offset, offset))
}
const unpack = (field, buf, offset) =>
field.unpack(buf, addOffsets(field.offset, offset))
// ===================================================================
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
const streamToExistingBuffer = (
stream,
buffer,
offset = 0,
end = buffer.length
) =>
new Promise((resolve, reject) => {
assert(offset >= 0)
assert(end > offset)
assert(end <= buffer.length)
let i = offset
const onData = chunk => {
const prev = i
i += chunk.length
if (i > end) {
return onError(new Error('too much data'))
}
chunk.copy(buffer, prev)
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(i - offset)
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
// ===================================================================
// Returns the checksum of a raw struct.
const computeChecksum = (struct, buf, offset = 0) => {
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumField = struct.fields.checksum
const checksumOffset = offset + checksumField.offset
for (let i = offset, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = offset + struct.size;
i < n;
++i
) {
sum += buf[i]
}
return ~sum >>> 0
}
const verifyChecksum = (struct, buf, offset) =>
unpack(struct.fields.checksum, buf, offset) ===
computeChecksum(struct, buf, offset)
const getParentLocatorSize = parentLocatorEntry => {
const { platformDataSpace } = parentLocatorEntry
if (platformDataSpace < SECTOR_SIZE) {
return platformDataSpace * SECTOR_SIZE
}
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
}
// ===================================================================
// Euclidean division, returns the quotient and the remainder of a / b.
const div = (a, b) => [Math.floor(a / b), a % b]
export default class Vhd {
constructor (handler, path) {
this._handler = handler
this._path = path
this._blockAllocationTable = null
this._blockBitmapSize = null
this._footer = null
this._header = null
this._parent = null
this._sectorsPerBlock = null
}
// Read `length` bytes starting from `begin`.
//
// - if `buffer`: it is filled starting from `offset`, and the
// number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_read (begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
return this._handler
.createReadStream(this._path, {
end: begin + length - 1,
start: begin,
})
.then(
buf
? stream =>
streamToExistingBuffer(
stream,
buf,
offset,
(offset || 0) + length
)
: streamToNewBuffer
)
}
// - if `buffer`: it is filled with 0 starting from `offset`, and
// the number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_zeroes (length, buf, offset = 0) {
if (buf) {
assert(offset >= 0)
assert(length > 0)
const end = offset + length
assert(end <= buf.length)
buf.fill(0, offset, end)
return Promise.resolve(length)
}
return Promise.resolve(Buffer.alloc(length))
}
// Return the position of a block in the VHD or undefined if not found.
_getBlockAddress (block) {
assert(block >= 0)
assert(block < this._header.maxTableEntries)
const blockAddr = this._blockAllocationTable[block]
if (blockAddr !== 0xffffffff) {
return blockAddr * SECTOR_SIZE
}
}
// -----------------------------------------------------------------
async readHeaderAndFooter () {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
if (!verifyChecksum(fuFooter, buf)) {
throw new Error('footer checksum does not match')
}
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
throw new Error('header checksum does not match')
}
return this._initMetadata(
unpack(fuHeader, buf, FOOTER_SIZE),
unpack(fuFooter, buf)
)
}
async _initMetadata (header, footer) {
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
assert(sectorsPerBlock % 1 === 0)
// 1 bit per sector, rounded up to full sectors
this._blockBitmapSize =
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
assert(this._blockBitmapSize === SECTOR_SIZE)
this._footer = footer
this._header = header
this.size = uint32ToUint64(this._footer.currentSize)
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
const parent = new Vhd(
this._handler,
`${dirname(this._path)}/${header.parentUnicodeName}`
)
await parent.readHeaderAndFooter()
await parent.readBlockAllocationTable()
this._parent = parent
}
}
// -----------------------------------------------------------------
async readBlockAllocationTable () {
const { maxTableEntries, tableOffset } = this._header
const fuTable = fu.uint32(maxTableEntries)
this._blockAllocationTable = unpack(
fuTable,
await this._read(uint32ToUint64(tableOffset), fuTable.size)
)
}
// -----------------------------------------------------------------
// read a single sector in a block
async _readBlockSector (block, sector, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
assert(begin + length <= SECTOR_SIZE)
const blockAddr = this._getBlockAddress(block)
const blockBitmapSize = this._blockBitmapSize
const parent = this._parent
if (
blockAddr &&
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
) {
return this._read(
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
length,
buf,
offset
)
}
return parent
? parent._readBlockSector(block, sector, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
_readBlock (block, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
const { blockSize } = this._header
assert(begin + length <= blockSize)
const blockAddr = this._getBlockAddress(block)
const parent = this._parent
if (!blockAddr) {
return parent
? parent._readBlock(block, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
if (!parent) {
return this._read(
blockAddr + this._blockBitmapSize + begin,
length,
buf,
offset
)
}
// FIXME: we should read as many sectors in a single pass as
// possible for maximum perf.
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
return this._readBlockSector(
block,
sector,
beginInSector,
Math.min(length, SECTOR_SIZE - beginInSector),
buf,
offset
)
}
read (buf, begin, length = buf.length, offset) {
assert(Buffer.isBuffer(buf))
assert(begin >= 0)
const { size } = this
if (begin >= size) {
return Promise.resolve(0)
}
const { blockSize } = this._header
const [block, beginInBlock] = div(begin, blockSize)
return this._readBlock(
block,
beginInBlock,
Math.min(length, blockSize - beginInBlock, size - begin),
buf,
offset
)
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,56 @@
{
"name": "vhd-lib",
"version": "0.1.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.44",
"@xen-orchestra/fs": "^0.0.0",
"async-iterator-to-stream": "^1.0.2",
"execa": "^0.10.0",
"from2": "^2.3.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.9.5",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1",
"tmp": "^0.0.33"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"fs-promise": "^2.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,7 @@
const MASK = 0x80
export const set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -0,0 +1,37 @@
import { SECTOR_SIZE } from './_constants'
export default function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrackCylinder
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrackCylinder = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
} else {
sectorsPerTrackCylinder = 17
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrackCylinder = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrackCylinder = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
}
const cylinders = Math.ceil(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
}

View File

@@ -0,0 +1,30 @@
export const BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
export const CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
export const FOOTER_SIZE = 512
export const HEADER_SIZE = 1024
export const SECTOR_SIZE = 512
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPE_FIXED = 2
export const DISK_TYPE_DYNAMIC = 3
export const DISK_TYPE_DIFFERENCING = 4
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORM_NONE = 0
export const PLATFORM_WI2R = 0x57693272
export const PLATFORM_WI2K = 0x5769326b
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16

View File

@@ -0,0 +1,56 @@
import { v4 as generateUuid } from 'uuid'
import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_FIXED,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PLATFORM_WI2K,
} from './_constants'
export function createFooter (
size,
timestamp,
geometry,
dataOffset,
diskType = DISK_TYPE_FIXED
) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
fileFormatVersion: FILE_FORMAT_VERSION,
dataOffset,
timestamp,
creatorApplication: CREATOR_APPLICATION,
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
originalSize: size,
currentSize: size,
diskGeometry: geometry,
diskType,
uuid: generateUuid(null, []),
})
checksumStruct(footer, fuFooter)
return footer
}
export function createHeader (
maxTableEntries,
tableOffset = HEADER_SIZE + FOOTER_SIZE,
blockSize = VHD_BLOCK_SIZE_BYTES
) {
const header = fuHeader.pack({
cookie: HEADER_COOKIE,
tableOffset,
headerVersion: HEADER_VERSION,
maxTableEntries,
blockSize,
})
checksumStruct(header, fuHeader)
return header
}

View File

@@ -0,0 +1,121 @@
import assert from 'assert'
import fu from 'struct-fu'
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const uint64Undefinable = fu.derive(
fu.uint32(2),
number =>
number === undefined
? [0xffffffff, 0xffffffff]
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ =>
_[0] === 0xffffffff && _[1] === 0xffffffff
? undefined
: _[0] * SIZE_OF_32_BITS + _[1]
)
export const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64Undefinable('dataOffset'), // offset of the header
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
fu.char('reserved', 426), // 86
])
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
export const fuHeader = fu.struct([
fu.char('cookie', 8),
uint64Undefinable('dataOffset'),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
assert.strictEqual(fuHeader.size, HEADER_SIZE)
export const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
export const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
export function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}

View File

@@ -0,0 +1,37 @@
import { dirname, relative } from 'path'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockAllocationTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
}

View File

@@ -0,0 +1,42 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter } from './_createFooterHeader'
export default asyncIteratorToStream(async function * (size, blockParser) {
const geometry = computeGeometryForSize(size)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry
)
let position = 0
function * filePadding (paddingLength) {
if (paddingLength > 0) {
const chunkSize = 1024 * 1024 // 1Mo
for (
let paddingPosition = 0;
paddingPosition + chunkSize < paddingLength;
paddingPosition += chunkSize
) {
yield Buffer.alloc(chunkSize)
}
yield Buffer.alloc(paddingLength % chunkSize)
}
}
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield * filePadding(paddingLength)
yield next.data
position = next.offsetBytes + next.data.length
}
yield * filePadding(actualSize - position)
yield footer
})

View File

@@ -0,0 +1,126 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
import {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
*/
function createBAT (
firstBlockPosition,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % 512, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
}
export default asyncIteratorToStream(async function * (
diskSize,
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
throw new Error(
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
)
}
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
const batPosition = FOOTER_SIZE + HEADER_SIZE
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
const geometry = computeGeometryForSize(diskSize)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry,
FOOTER_SIZE,
DISK_TYPE_DYNAMIC
)
const header = createHeader(
maxTableEntries,
batPosition,
VHD_BLOCK_SIZE_BYTES
)
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
createBAT(firstBlockPosition, blockAddressList, ratio, bat, bitmapSize)
let position = 0
function * yieldAndTrack (buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
async function * generateFileContent (blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield * yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * 512
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
currentVhdBlockIndex = batIndex
}
const blockOffset = (next.offsetBytes / 512) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + next.offsetBytes % VHD_BLOCK_SIZE_BYTES
)
}
yield * yieldAndTrack(currentBlockWithBitmap)
}
yield * yieldAndTrack(footer, 0)
yield * yieldAndTrack(header, FOOTER_SIZE)
yield * yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield * generateFileContent(blockIterator, bitmapSize, ratio)
yield * yieldAndTrack(footer)
})

View File

@@ -0,0 +1,153 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { dirname, resolve } from 'path'
import Vhd from './vhd'
import {
BLOCK_UNUSED,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const resolveRelativeFromFile = (file, path) =>
resolve('/', dirname(file), path).slice(1)
export default asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockAllocationTable()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// this is the root VHD
const rootVhd = vhds[nVhds - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
const bat = Buffer.allocUnsafe(vhd.batSize)
let footer = {
...vhd.footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil(
(header.tableOffset + bat.length) / SECTOR_SIZE
);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
// TODO: for generic usage the bitmap needs to be properly computed for each block
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd === rootVhd
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})

View File

@@ -0,0 +1,8 @@
export { default } from './vhd'
export { default as chainVhd } from './chain'
export { default as createReadableRawStream } from './createReadableRawStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'

View File

@@ -2,25 +2,25 @@
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { randomBytes } from 'crypto'
import { fromEvent } from 'promise-toolbox'
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
import LocalHandler from './remote-handlers/local'
import vhdMerge, {
chainVhd,
createReadStream,
Vhd,
VHD_SECTOR_SIZE,
} from './vhd-merge'
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
import chainVhd from './chain'
import createReadStream from './createSyntheticStream'
import Vhd from './vhd'
import vhdMerge from './merge'
import { SECTOR_SIZE } from './_constants'
const initialDir = process.cwd()
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await tmpDir()
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
@@ -57,11 +57,11 @@ test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -70,20 +70,18 @@ test('blocks can be moved', async () => {
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb =>
randomBytes(VHD_SECTOR_SIZE, cb)
)
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
@@ -93,7 +91,7 @@ test('the BAT MSB is not used for sign', async () => {
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockTable()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
@@ -110,7 +108,7 @@ test('the BAT MSB is not used for sign', async () => {
} finally {
fs.close(recoveredFile)
}
const recovered = await streamToBuffer(
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
@@ -124,11 +122,11 @@ test('writeData on empty file', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
@@ -139,11 +137,11 @@ test('writeData in 2 non-overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(
@@ -159,11 +157,11 @@ test('writeData in 2 overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
@@ -179,11 +177,11 @@ test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -203,7 +201,7 @@ test('coalesce works with empty parent files', async () => {
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
@@ -226,11 +224,11 @@ test('coalesce works in normal cases', async () => {
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
@@ -242,7 +240,7 @@ test('coalesce works in normal cases', async () => {
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
@@ -261,7 +259,7 @@ test('coalesce works in normal cases', async () => {
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
@@ -270,15 +268,16 @@ test('coalesce works in normal cases', async () => {
)
})
test('createReadStream passes vhd-util check', async () => {
test('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = createReadStream(handler, 'randomfile.vhd')
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
)
await checkFile('recovered.vhd')
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
})

View File

@@ -0,0 +1,77 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import concurrency from 'limit-concurrency-decorator'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
// Merge vhd child into vhd parent.
export default concurrency(2)(async function merge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(
parentDiskType === DISK_TYPE_DIFFERENCING ||
parentDiskType === DISK_TYPE_DYNAMIC
)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
// Read allocation table of child/parent.
await Promise.all([
parentVhd.readBlockAllocationTable(),
childVhd.readBlockAllocationTable(),
])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
})

View File

@@ -0,0 +1,134 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
import { createFooter } from './_createFooterHeader'
import createReadableRawVHDStream from './createReadableRawStream'
import createReadableSparseVHDStream from './createReadableSparseStream'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const fileSize = 1000
const stream = createReadableRawVHDStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = createReadableRawVHDStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
})
).rejects.toThrow('Received out of order blocks')
})
test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
const fileSize = blockSize * 110
const stream = createReadableSparseVHDStream(
fileSize,
blockSize,
blocks.map(b => b.offsetBytes),
blocks
)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
])
const out1 = await readFile('out1.raw')
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -1,18 +1,30 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import concurrency from 'limit-concurrency-decorator'
import fu from 'struct-fu'
import { dirname, relative } from 'path'
import getStream from 'get-stream'
import { fromEvent } from 'promise-toolbox'
import type RemoteHandler from './remote-handlers/abstract'
import constantStream from './constant-stream'
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
import constantStream from './_constant-stream'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
SECTOR_SIZE,
} from './_constants'
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
@@ -24,160 +36,12 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
//
// ===================================================================
// Sizes in bytes.
const VHD_FOOTER_SIZE = 512
const VHD_HEADER_SIZE = 1024
export const VHD_SECTOR_SIZE = 512
// Block allocation table entry size. (Block addr)
const VHD_ENTRY_SIZE = 4
const VHD_PARENT_LOCATOR_ENTRIES = 8
const VHD_PLATFORM_CODE_NONE = 0
// Types of backup treated. Others are not supported.
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
export const PLATFORM_NONE = 0
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
// Other.
const BLOCK_UNUSED = 0xffffffff
const BIT_MASK = 0x80
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64('dataOffset'), // offset of the header, should always be 512
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.char('reserved', 426), // 86
])
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.uint8('dataOffsetUnused', 8),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
VHD_PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
// ===================================================================
// Helpers
// ===================================================================
const computeBatSize = entries =>
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
// Check/Set a bit on a vhd map.
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
const mapSetBit = (map, bit) => {
map[bit >> 3] |= BIT_MASK >> (bit & 7)
}
const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// ===================================================================
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
@@ -187,6 +51,10 @@ const assertChecksum = (name, buf, struct) => {
}
}
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
// Format:
@@ -210,7 +78,8 @@ const assertChecksum = (name, buf, struct) => {
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export class Vhd {
export default class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
@@ -232,7 +101,12 @@ export class Vhd {
}
_read (start, n) {
return this._readStream(start, n).then(streamToBuffer)
return this._readStream(start, n)
.then(getStream.buffer)
.then(buf => {
assert.equal(buf.length, n)
return buf
})
}
containsBlock (id) {
@@ -243,15 +117,15 @@ export class Vhd {
getEndOfHeaders () {
const { header } = this
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
let end = FOOTER_SIZE + HEADER_SIZE
// Max(end, block allocation table end)
end = Math.max(end, header.tableOffset + this.batSize)
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
if (entry.platformCode !== PLATFORM_NONE) {
end = Math.max(
end,
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
@@ -266,7 +140,7 @@ export class Vhd {
// Returns the first sector after data.
getEndOfData () {
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
@@ -283,25 +157,46 @@ export class Vhd {
return sectorsToBytes(end)
}
// Get the beginning (footer + header) of a vhd file.
async readHeaderAndFooter () {
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
// TODO: extract the checks into reusable functions:
// - better human reporting
// - auto repair if possible
async readHeaderAndFooter (checkSecondFooter = true) {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
assert(
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
'footer1 !== footer2'
)
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
header.blockSize / VHD_SECTOR_SIZE
))
const sectorsPerBlock = (this.sectorsPerBlock =
header.blockSize / SECTOR_SIZE)
// Compute bitmap size in sectors.
// Default: 1.
@@ -317,23 +212,18 @@ export class Vhd {
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
}
// Check if a vhd object has a block allocation table.
hasBlockAllocationTableMap () {
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockTable () {
async readBlockAllocationTable () {
const { header } = this
this.blockTable = await this._read(
header.tableOffset,
header.maxTableEntries * VHD_ENTRY_SIZE
header.maxTableEntries * 4
)
}
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
return this.blockTable.readUInt32BE(block * 4)
}
_readBlock (blockId, onlyBitmap = false) {
@@ -371,7 +261,7 @@ export class Vhd {
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += VHD_ENTRY_SIZE
j += 4
if (i === n) {
const error = new Error('no allocated block found')
@@ -395,7 +285,7 @@ export class Vhd {
}
i += 1
j += VHD_ENTRY_SIZE
j += 4
}
return { first, firstSector, last, lastSector }
@@ -431,7 +321,7 @@ export class Vhd {
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
@@ -439,7 +329,7 @@ export class Vhd {
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
debug(
@@ -478,7 +368,7 @@ export class Vhd {
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
@@ -491,21 +381,18 @@ export class Vhd {
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * VHD_ENTRY_SIZE
const i = block * 4
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(
blockTable.slice(i, i + VHD_ENTRY_SIZE),
this.header.tableOffset + i
)
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
@@ -647,7 +534,7 @@ export class Vhd {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = VHD_FOOTER_SIZE
const offset = FOOTER_SIZE
debug(
`Write header at: ${offset} (checksum=${
header.checksum
@@ -657,12 +544,12 @@ export class Vhd {
}
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
@@ -681,11 +568,11 @@ export class Vhd {
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
VHD_SECTOR_SIZE,
SECTOR_SIZE,
buffer.length
)
let inputBuffer
@@ -695,7 +582,7 @@ export class Vhd {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * VHD_SECTOR_SIZE,
offsetInBlockSectors * SECTOR_SIZE,
startInBuffer,
endInBuffer
)
@@ -710,10 +597,10 @@ export class Vhd {
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
firstLocatorOffset / VHD_SECTOR_SIZE
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
firstLocatorOffset / SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
@@ -722,274 +609,23 @@ export class Vhd {
}
return firstLocatorOffset
}
}
// Merge vhd child into vhd parent.
//
// Child must be a delta backup !
// Parent must be a full backup !
//
// TODO: update the identifier of the parent VHD.
export default concurrency(2)(async function vhdMerge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
// Child must be a delta.
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
throw new Error('Unable to merge, child is not a delta backup.')
}
// Allocation table map is not yet implemented.
if (
parentVhd.hasBlockAllocationTableMap() ||
childVhd.hasBlockAllocationTableMap()
) {
throw new Error('Unsupported allocation table map.')
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
async setUniqueParentLocator (fileNameString) {
const { header } = this
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
} finally {
await parentHandler.closeFile(parentFd)
}
})
// returns true if the child was actually modified
export async function chainVhd (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(parentName, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
await childVhd._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
dataSpaceSectors
)
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
await childVhd.writeHeader()
await childVhd.writeFooter()
return true
}
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockTable()
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// data of our synthetic VHD
// TODO: empty parentUuid and parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: 512 + 1024,
parentUnicodeName: '',
}
const bat = Buffer.allocUnsafe(
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
)
let footer = {
...vhd.footer,
diskType: HARD_DISK_TYPE_DYNAMIC,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock =
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * VHD_SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.5.1",
"version": "0.5.2",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.10.0",
"version": "0.11.0",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -35,6 +35,7 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.26.0",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -42,6 +43,7 @@
"devDependencies": {
"babel-cli": "^6.24.1",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
@@ -56,7 +58,8 @@
},
"babel": {
"plugins": [
"lodash"
"lodash",
"transform-runtime"
],
"presets": [
[

View File

@@ -1,6 +1,6 @@
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, startCase } from 'lodash'
import { find, forEach, get, startCase } from 'lodash'
import pkg from '../package'
@@ -41,9 +41,9 @@ const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormater = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
: timestamp => moment(timestamp).format(DATE_FORMAT)
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
@@ -66,6 +66,7 @@ const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
const UNHEALTHY_VDI_CHAIN_MESSAGE =
@@ -94,14 +95,351 @@ class BackupReportsXoPlugin {
this._xo.removeListener('job:terminated', this._report)
}
_wrapper (status) {
return new Promise(resolve => resolve(this._listener(status))).catch(
logError
)
_wrapper (status, job, schedule) {
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule)
: this._listener(status, job, schedule)
)
).catch(logError)
}
async _backupNgListener (runJobId, _, { timezone }) {
const xo = this._xo
const logs = await xo.getBackupNgLogs(runJobId)
const jobLog = logs['roots'][0]
const vmsTaskLog = logs[jobLog.id]
const { reportWhen, mode } = jobLog.data || {}
if (reportWhen === 'never') {
return
}
const formatDate = createDateFormater(timezone)
const jobName = (await xo.getJob(jobLog.jobId, 'backup')).name
if (jobLog.error !== undefined) {
const [globalStatus, icon] =
jobLog.error.message === NO_VMS_MATCH_THIS_PATTERN
? ['Skipped', ICON_SKIPPED]
: ['Failure', ICON_FAILURE]
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(jobLog.start)}`,
`- **End time**: ${formatDate(jobLog.end)}`,
`- **Duration**: ${formatDuration(jobLog.duration)}`,
`- **Error**: ${jobLog.error.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${jobName} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Backup report for ${jobName} - Error : ${
jobLog.error.message
}`,
})
}
const failedVmsText = []
const skippedVmsText = []
const successfulVmsText = []
const nagiosText = []
let globalMergeSize = 0
let globalTransferSize = 0
let nFailures = 0
let nSkipped = 0
for (const vmTaskLog of vmsTaskLog || []) {
const vmTaskStatus = vmTaskLog.status
if (vmTaskStatus === 'success' && reportWhen === 'failure') {
return
}
const vmId = vmTaskLog.data.id
let vm
try {
vm = xo.getObject(vmId)
} catch (e) {}
const text = [
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
`- **Start time**: ${formatDate(vmTaskLog.start)}`,
`- **End time**: ${formatDate(vmTaskLog.end)}`,
`- **Duration**: ${formatDuration(vmTaskLog.duration)}`,
]
const failedSubTasks = []
const operationsText = []
const srsText = []
const remotesText = []
for (const subTaskLog of logs[vmTaskLog.taskId] || []) {
const { data, status, result, message } = subTaskLog
const icon =
subTaskLog.status === 'success' ? ICON_SUCCESS : ICON_FAILURE
const errorMessage = ` **Error**: ${get(result, 'message')}`
if (message === 'snapshot') {
operationsText.push(`- **Snapshot** ${icon}`)
if (status === 'failure') {
failedSubTasks.push('Snapshot')
operationsText.push('', errorMessage)
}
} else if (data.type === 'remote') {
const remoteId = data.id
const remote = await xo.getRemote(remoteId).catch(() => {})
remotesText.push(
`- **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${remoteId}) ${icon}`
)
if (status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : remoteId)
remotesText.push('', errorMessage)
}
} else {
const srId = data.id
let sr
try {
sr = xo.getObject(srId)
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, srId]
srsText.push(`- **${srName}** (${srUuid}) ${icon}`)
if (status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : srId)
srsText.push('', errorMessage)
}
}
}
if (operationsText.length !== 0) {
operationsText.unshift(`#### Operations`, '')
}
if (srsText.length !== 0) {
srsText.unshift(`#### SRs`, '')
}
if (remotesText.length !== 0) {
remotesText.unshift(`#### remotes`, '')
}
const subText = [...operationsText, '', ...srsText, '', ...remotesText]
const result = vmTaskLog.result
if (vmTaskStatus === 'failure' && result !== undefined) {
const { message } = result
if (isSkippedError(result)) {
++nSkipped
skippedVmsText.push(
...text,
`- **Reason**: ${
message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: message
}`,
''
)
nagiosText.push(
`[(Skipped) ${
vm !== undefined ? vm.name_label : 'undefined'
} : ${message} ]`
)
} else {
++nFailures
failedVmsText.push(...text, `- **Error**: ${message}`, '')
nagiosText.push(
`[(Failed) ${
vm !== undefined ? vm.name_label : 'undefined'
} : ${message} ]`
)
}
} else {
let transferSize, transferDuration, mergeSize, mergeDuration
forEach(logs[vmTaskLog.taskId], ({ taskId }) => {
if (transferSize !== undefined) {
return false
}
const transferTask = find(logs[taskId], { message: 'transfer' })
if (transferTask !== undefined) {
transferSize = transferTask.result.size
transferDuration = transferTask.end - transferTask.start
}
const mergeTask = find(logs[taskId], { message: 'merge' })
if (mergeTask !== undefined) {
mergeSize = mergeTask.result.size
mergeDuration = mergeTask.end - mergeTask.start
}
})
if (transferSize !== undefined) {
globalTransferSize += transferSize
text.push(
`- **Transfer size**: ${formatSize(transferSize)}`,
`- **Transfer speed**: ${formatSpeed(
transferSize,
transferDuration
)}`
)
}
if (mergeSize !== undefined) {
globalMergeSize += mergeSize
text.push(
`- **Merge size**: ${formatSize(mergeSize)}`,
`- **Merge speed**: ${formatSpeed(mergeSize, mergeDuration)}`
)
}
if (vmTaskStatus === 'failure') {
++nFailures
failedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[(Failed) ${
vm !== undefined ? vm.name_label : 'undefined'
}: (failed)[${failedSubTasks.toString()}]]`
)
} else {
successfulVmsText.push(...text, '', '', ...subText, '')
}
}
}
const globalSuccess = nFailures === 0 && nSkipped === 0
if (reportWhen === 'failure' && globalSuccess) {
return
}
const nVms = vmsTaskLog.length
const nSuccesses = nVms - nFailures - nSkipped
const globalStatus = globalSuccess
? `Success`
: nFailures !== 0 ? `Failure` : `Skipped`
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(jobLog.start)}`,
`- **End time**: ${formatDate(jobLog.end)}`,
`- **Duration**: ${formatDuration(jobLog.duration)}`,
`- **Successes**: ${nSuccesses} / ${nVms}`,
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
'',
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
'',
...failedVmsText
)
}
if (nSkipped !== 0) {
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
}
if (nSuccesses !== 0 && reportWhen !== 'failure') {
markdown.push(
'---',
'',
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
'',
...successfulVmsText
)
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${jobName} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
})
}
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
nagiosStatus,
message: nagiosMarkdown,
}),
])
}
_listener (status) {
const { calls } = status
const { calls, timezone, error } = status
const formatDate = createDateFormater(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =
error.message === NO_VMS_MATCH_THIS_PATTERN
? ['Skipped', ICON_SKIPPED]
: ['Failure', ICON_FAILURE]
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(status.end - status.start)}`,
`- **Error**: ${error.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
error.message
}`,
})
}
const callIds = Object.keys(calls)
const nCalls = callIds.length
@@ -139,8 +477,6 @@ class BackupReportsXoPlugin {
const skippedBackupsText = []
const successfulBackupText = []
const formatDate = createDateFormater(status.timezone)
forEach(calls, call => {
const { id = call.params.vm } = call.params
@@ -226,9 +562,8 @@ class BackupReportsXoPlugin {
return
}
const { end, start } = status
const { tag } = oneCall.params
const duration = end - start
const duration = status.end - status.start
const nSuccesses = nCalls - nFailures - nSkipped
const globalStatus = globalSuccess
? `Success`
@@ -238,8 +573,8 @@ class BackupReportsXoPlugin {
`## Global status: ${globalStatus}`,
'',
`- **Type**: ${formatMethod(method)}`,
`- **Start time**: ${formatDate(start)}`,
`- **End time**: ${formatDate(end)}`,
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(duration)}`,
`- **Successes**: ${nSuccesses} / ${nCalls}`,
]
@@ -285,37 +620,20 @@ class BackupReportsXoPlugin {
markdown = markdown.join('\n')
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: globalSuccess ? 0 : 2,
message: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
}),
])
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
})
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-perf-alert",
"version": "0.0.0",
"version": "0.1.0",
"license": "AGPL-3.0",
"description": "",
"keywords": [],

View File

@@ -1,10 +1,11 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const VM_FUNCTIONS = {
cpuUsage: {
name: 'VM CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
},
},
memoryUsage: {
name: 'VM memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
const HOST_FUNCTIONS = {
cpuUsage: {
name: 'host CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
},
},
memoryUsage: {
name: 'host memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
)
},
getDisplayableValue,
shouldAlarm: () => {
return getDisplayableValue() > threshold
},
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
}
const SR_FUNCTIONS = {
storageUsage: {
name: 'SR storage usage',
description:
'Raises an alarm when the used disk space % is higher than the threshold',
unit: '% used',
comparator: '>',
createGetter: threshold => sr => {
const getDisplayableValue = () =>
sr.physical_utilisation * 100 / sr.physical_size
return {
getDisplayableValue,
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
const TYPE_FUNCTION_MAP = {
vm: VM_FUNCTIONS,
host: HOST_FUNCTIONS,
sr: SR_FUNCTIONS,
}
// list of currently ringing alarms, to avoid double notification
@@ -229,11 +250,52 @@ export const configurationSchema = {
required: ['uuids'],
},
},
srMonitors: {
type: 'array',
title: 'SR Monitors',
description:
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
'The Average is compared to the threshold and an alarm is raised upon crossing',
items: {
type: 'object',
properties: {
uuids: {
title: 'SRs',
type: 'array',
items: {
type: 'string',
$type: 'SR',
},
},
variableName: {
title: 'Alarm Type',
description: Object.keys(SR_FUNCTIONS)
.map(
k =>
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
SR_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
default: Object.keys(SR_FUNCTIONS)[0],
enum: Object.keys(SR_FUNCTIONS),
},
alarmTriggerLevel: {
title: 'Threshold',
description:
'The direction of the crossing is given by the Alarm type',
type: 'number',
default: 80,
},
},
required: ['uuids'],
},
},
toEmails: {
type: 'array',
title: 'Email addresses',
description: 'Email addresses of the alert recipients',
items: {
type: 'string',
},
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
currentAlarms[alarmId] = true
raiseCallback(alarmId)
}
} else {
if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
} else if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
}
}
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
clearCurrentAlarms()
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_generateUrl (type, object) {
const map = {
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
const { baseUrl } = this._configuration
const { uuid } = object
switch (type) {
case 'vm':
return `${baseUrl}#/vms/${uuid}/stats`
case 'host':
return `${baseUrl}#/hosts/${uuid}/stats`
case 'sr':
return `${baseUrl}#/srs/${uuid}/general`
default:
return 'unknown type'
}
return map[type]()
}
async test () {
const hostMonitorPart2 = await Promise.all(
map(this._getMonitors(), async m => {
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
return `
const monitorBodies = await Promise.all(
map(
this._getMonitors(),
async m => `
## Monitor for ${m.title}
${m.tableHeader}
${tableBody.join('')}`
})
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
)
)
this._sendAlertEmail(
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
`
# Performance Alert Test
Your alarms and their current status:
${hostMonitorPart2.join('\n')}`
${monitorBodies.join('\n')}`
)
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_parseDefinition (definition) {
const alarmId = `${definition.objectType}|${definition.variableName}|${
definition.alarmTriggerLevel
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
definition.alarmTriggerPeriod !== undefined
? definition.alarmTriggerPeriod
: 60
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
return {
...definition,
alarmId,
vmFunction: typeFunction,
title: `${typeText} ${definition.variableName} ${
typeFunction.comparator
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
title: `${typeFunction.name} ${typeFunction.comparator} ${
definition.alarmTriggerLevel
}${typeFunction.unit}`,
snapshot: async () => {
return Promise.all(
map(definition.uuids, async uuid => {
try {
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
const objectLink = `[${
monitoredObject.name_label
}](${this._generateUrl(definition.objectType, monitoredObject)})`
const rrd = await this.getRrd(monitoredObject, observationPeriod)
const couldFindRRD = rrd !== null
const result = {
object: monitoredObject,
couldFindRRD,
objectLink: objectLink,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
uuid,
name: definition.name,
object: this._xo.getXapi(uuid).getObject(uuid),
}
if (!couldFindRRD) {
return result
if (result.object === undefined) {
throw new Error('object not found')
}
const data = parseData(rrd, monitoredObject.uuid)
const textValue =
data.getDisplayableValue().toFixed(1) + typeFunction.unit
const shouldAlarm = data.shouldAlarm()
return {
...result,
value: data.getDisplayableValue(),
shouldAlarm: shouldAlarm,
textValue: textValue,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: ${textValue}\n`,
tableItem: `${objectLink} | ${textValue} | ${
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
}\n`,
result.objectLink = `[${
result.object.name_label
}](${this._generateUrl(definition.objectType, result.object)})`
if (typeFunction.createGetter === undefined) {
// Stats via RRD
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
} else {
// Stats via XAPI
const getter = typeFunction.createGetter(
definition.alarmTriggerLevel
)
const data = getter(result.object)
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
result.listItem = ` * ${result.objectLink}: ${
result.value === undefined
? "**Can't read performance counters**"
: result.value.toFixed(1) + typeFunction.unit
}\n`
return result
} catch (_) {
return {
uuid,
object: null,
couldFindRRD: false,
objectLink: `cannot find object ${uuid}`,
listItem: ` * ${typeText} ${uuid} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
}
}
})
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
_getMonitors () {
return map(this._configuration.hostMonitors, def =>
this._parseDefinition({ ...def, objectType: 'host' })
).concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.srMonitors, def =>
this._parseDefinition({ ...def, objectType: 'sr' })
)
)
}
async _checkMonitors () {
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
for (const entry of snapshot) {
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}|RRD`,
!entry.couldFindRRD,
entry.value === undefined,
() => {
this._sendAlertEmail(
'Secondary Issue',
@@ -477,9 +553,11 @@ ${entry.listItem}`
},
() => {}
)
if (!entry.couldFindRRD) {
if (entry.value === undefined) {
continue
}
const raiseAlarm = alarmId => {
// sample XenCenter message:
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
@@ -500,23 +578,24 @@ ${entry.listItem}`
this._sendAlertEmail(
'',
`
## ALERT ${monitor.title}
## ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
const lowerAlarm = alarmId => {
console.log('lowering Alarm', alarmId)
this._sendAlertEmail(
'END OF ALERT',
`
## END OF ALERT ${monitor.title}
## END OF ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}`,
entry.shouldAlarm,

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.4.0",
"version": "0.4.2",
"license": "AGPL-3.0",
"description": "",
"keywords": [

View File

@@ -159,30 +159,30 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<td>{{normaliseValue global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<td>{{normaliseValue global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<tr>
<tr>
<td>Disk read:</td>
<td>{{global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
<td>{{normaliseValue global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
</td>
<tr>
<tr>
<td>Disk write:</td>
<td>{{global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
<td>{{normaliseValue global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
<td>{{normaliseValue global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
<td>{{normaliseValue global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -205,7 +205,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
@@ -216,7 +216,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -226,7 +226,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -236,7 +236,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -246,7 +246,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -256,7 +256,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -275,28 +275,28 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
<td>{{normaliseValue global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
<td>{{normaliseValue global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
</td>
</td>
<tr>
<tr>
<td>Load average:</td>
<td>{{global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
<td>{{normaliseValue global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
<td>{{normaliseValue global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
<td>{{normaliseValue global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -318,7 +318,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
<tr>
@@ -328,7 +328,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -338,7 +338,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} </td>
<td>{{normaliseValue this.value}} </td>
</tr>
{{/each}}
<tr>
@@ -348,7 +348,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -358,7 +358,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -378,7 +378,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
</table>

View File

@@ -8,6 +8,7 @@ import {
filter,
find,
forEach,
get,
isFinite,
map,
orderBy,
@@ -137,7 +138,7 @@ Handlebars.registerHelper(
value =>
new Handlebars.SafeString(
isFinite(+value) && +value !== 0
? value > 0
? (value = round(value, 2)) > 0
? `(<b style="color: green;">▲ ${value}</b>)`
: `(<b style="color: red;">▼ ${String(value).slice(1)}</b>)`
: ''
@@ -164,7 +165,7 @@ const computeDoubleMean = val => computeMean(map(val, computeMean))
function computeMeans (objects, options) {
return zipObject(
options,
map(options, opt => round(computeMean(map(objects, opt)), 2))
map(options, opt => computeMean(map(objects, opt)), 2)
)
}
@@ -185,7 +186,7 @@ function getTop (objects, options) {
obj => ({
uuid: obj.uuid,
name: obj.name,
value: round(obj[opt], 2),
value: obj[opt],
})
)
)
@@ -200,7 +201,7 @@ function computePercentage (curr, prev, options) {
opt =>
prev[opt] === 0 || prev[opt] === null
? 'NONE'
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
: `${(curr[opt] - prev[opt]) * 100 / prev[opt]}`
)
)
}
@@ -228,10 +229,14 @@ async function getVmsStats ({ runningVms, xo }) {
name: vm.name_label,
cpu: computeDoubleMean(vmStats.stats.cpus),
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
diskRead:
computeDoubleMean(values(get(vmStats.stats.xvds, 'r'))) / mibPower,
diskWrite:
computeDoubleMean(values(get(vmStats.stats.xvds, 'w'))) / mibPower,
netReception:
computeDoubleMean(get(vmStats.stats.vifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(get(vmStats.stats.vifs, 'tx')) / kibPower,
}
})
),
@@ -251,9 +256,10 @@ async function getHostsStats ({ runningHosts, xo }) {
cpu: computeDoubleMean(hostStats.stats.cpus),
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
load: computeMean(hostStats.stats.load),
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
netReception:
computeDoubleMean(get(hostStats.stats.pifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
computeDoubleMean(get(hostStats.stats.pifs, 'tx')) / kibPower,
}
})
),

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "5.18.3",
"version": "5.19.7",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -34,6 +34,7 @@
"@babel/polyfill": "7.0.0-beta.44",
"@marsaud/smb2-promise": "^0.2.1",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/fs": "^0.0.0",
"ajv": "^6.1.1",
"app-conf": "^0.5.0",
"archiver": "^2.1.0",
@@ -59,6 +60,7 @@
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.6.2",
"helmet": "^3.9.0",
@@ -109,6 +111,7 @@
"tmp": "^0.0.33",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.1.0",
"ws": "^5.0.0",
"xen-api": "^0.16.9",
"xml2js": "^0.4.19",
@@ -116,7 +119,8 @@
"xo-collection": "^0.4.1",
"xo-common": "^0.1.1",
"xo-remote-parser": "^0.3",
"xo-vmdk-to-vhd": "0.0.12"
"xo-vmdk-to-vhd": "^0.1.1",
"yazl": "^2.4.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",

View File

@@ -1,3 +1,7 @@
import { basename } from 'path'
import { safeDateFormat } from '../utils'
export function createJob ({ schedules, ...job }) {
job.userId = this.user.id
return this.createBackupNgJob(job, schedules)
@@ -130,6 +134,14 @@ runJob.params = {
// -----------------------------------------------------------------------------
export function getAllLogs () {
return this.getBackupNgLogs()
}
getAllLogs.permission = 'admin'
// -----------------------------------------------------------------------------
export function deleteVmBackup ({ id }) {
return this.deleteVmBackupNg(id)
}
@@ -171,3 +183,88 @@ importVmBackup.params = {
type: 'string',
},
}
// -----------------------------------------------------------------------------
export function listPartitions ({ remote, disk }) {
return this.listBackupNgDiskPartitions(remote, disk)
}
listPartitions.permission = 'admin'
listPartitions.params = {
disk: {
type: 'string',
},
remote: {
type: 'string',
},
}
export function listFiles ({ remote, disk, partition, path }) {
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
}
listFiles.permission = 'admin'
listFiles.params = {
disk: {
type: 'string',
},
partition: {
type: 'string',
optional: true,
},
path: {
type: 'string',
},
remote: {
type: 'string',
},
}
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
const zipStream = await this.fetchBackupNgPartitionFiles(
remote,
disk,
partition,
paths
)
res.setHeader('content-disposition', 'attachment')
res.setHeader('content-type', 'application/octet-stream')
return zipStream
}
export async function fetchFiles (params) {
const { paths } = params
let filename = `restore_${safeDateFormat(new Date())}`
if (paths.length === 1) {
filename += `_${basename(paths[0])}`
}
filename += '.zip'
return this.registerHttpRequest(handleFetchFiles, params, {
suffix: encodeURI(`/${filename}`),
}).then(url => ({ $getFrom: url }))
}
fetchFiles.permission = 'admin'
fetchFiles.params = {
disk: {
type: 'string',
},
partition: {
optional: true,
type: 'string',
},
paths: {
items: { type: 'string' },
minLength: 1,
type: 'array',
},
remote: {
type: 'string',
},
}

View File

@@ -1,19 +1,5 @@
export async function get ({ namespace }) {
const logger = await this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger
.createReadStream()
.on('data', data => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
export function get ({ namespace }) {
return this.getLogs(namespace)
}
get.description = 'returns logs list for one namespace'

View File

@@ -204,8 +204,8 @@ export async function createNfs ({
}
// if NFS options given
if (nfsVersion) {
deviceConfig.options = nfsVersion
if (nfsOptions) {
deviceConfig.options = nfsOptions
}
const srRef = await xapi.call(

View File

@@ -12,6 +12,10 @@ import { forEach, map, mapFilter, parseSize } from '../utils'
// ===================================================================
export function getHaValues () {
return ['best-effort', 'restart', '']
}
function checkPermissionOnSrs (vm, permission = 'operate') {
const permissions = []
forEach(vm.$VBDs, vbdId => {
@@ -151,10 +155,10 @@ export async function create (params) {
await Promise.all([
params.share
? Promise.all(
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
)
)
)
: this.addAcl(user.id, vm.id, 'admin'),
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet),
])
@@ -556,11 +560,11 @@ set.params = {
name_description: { type: 'string', optional: true },
// TODO: provides better filtering of values for HA possible values: "best-
// effort" meaning "try to restart this VM if possible but don't consider the
// Pool to be overcommitted if this is not possible"; "restart" meaning "this
// VM should be restarted"; "" meaning "do not try to restart this VM"
high_availability: { type: 'boolean', optional: true },
high_availability: {
optional: true,
pattern: new RegExp(`^(${getHaValues().join('|')})$`),
type: 'string',
},
// Number of virtual CPUs to allocate.
CPUs: { type: 'integer', optional: true },
@@ -597,6 +601,9 @@ set.params = {
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
// Emulate HVM C000 PCI device for Windows Update to fetch or update PV drivers
hasVendorDevice: { type: 'boolean', optional: true },
// Move the vm In to/Out of Self Service
resourceSet: { type: ['string', 'null'], optional: true },

View File

@@ -1,7 +1,6 @@
import getStream from 'get-stream'
import { forEach } from 'lodash'
import { streamToBuffer } from '../utils'
// ===================================================================
export function clean () {
@@ -61,7 +60,7 @@ getAllObjects.params = {
export async function importConfig () {
return {
$sendTo: await this.registerHttpRequest(async (req, res) => {
await this.importConfig(JSON.parse(await streamToBuffer(req)))
await this.importConfig(JSON.parse(await getStream.buffer(req)))
res.end('config successfully imported')
}),

View File

@@ -7,6 +7,7 @@ import has from 'lodash/has'
import helmet from 'helmet'
import includes from 'lodash/includes'
import proxyConsole from './proxy-console'
import pw from 'pw'
import serveStatic from 'serve-static'
import startsWith from 'lodash/startsWith'
import stoppable from 'stoppable'
@@ -227,12 +228,12 @@ async function registerPlugin (pluginPath, pluginName) {
// instance.
const instance = isFunction(factory)
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
: factory
await this.registerPlugin(
@@ -311,6 +312,13 @@ async function makeWebServerListen (
) {
if (cert && key) {
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
if (opts.key.includes('ENCRYPTED')) {
opts.passphrase = await new Promise(resolve => {
console.log('Encrypted key %s', key)
process.stdout.write(`Enter pass phrase: `)
pw(resolve)
})
}
}
try {
const niceAddress = await webServer.listen(opts)

View File

@@ -1,16 +1,15 @@
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser } from 'parse-pairs'
import { isArray, map } from 'lodash'
// ===================================================================
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => (fields, ...args) =>
execa
.stdout(command, [
const makeFunction = command => async (fields, ...args) => {
return splitLines(
await execa.stdout(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
String(fields),
...args,
])
.then(stdout =>
map(
splitLines(stdout),
isArray(fields)
? parse
: line => {
const data = parse(line)
return data[fields]
}
)
)
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
export const lvs = makeFunction('lvs')
export const pvs = makeFunction('pvs')

View File

@@ -16,6 +16,11 @@ export default {
key: {
type: 'string',
},
type: {
default: 'call',
enum: ['backup', 'call'],
},
data: {},
},
required: ['event', 'userId', 'jobId', 'key'],
required: ['event', 'userId', 'jobId'],
}

View File

@@ -0,0 +1,18 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.end'],
},
taskId: {
type: 'string',
description: 'identifier of this task',
},
status: {
enum: ['canceled', 'failure', 'success'],
},
result: {},
},
required: ['event', 'taskId', 'status'],
}

View File

@@ -0,0 +1,15 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.start'],
},
parentId: {
type: 'string',
description: 'identifier of the parent task or job',
},
data: {},
},
required: ['event'],
}

View File

@@ -1,28 +0,0 @@
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
export { streamToNewBuffer as default }

View File

@@ -55,10 +55,6 @@ export const asyncMap = (collection, iteratee) => {
// -------------------------------------------------------------------
export streamToBuffer from './stream-to-new-buffer'
// -------------------------------------------------------------------
export function camelToSnakeCase (string) {
return string.replace(
/([a-z0-9])([A-Z])/g,

View File

@@ -1,72 +0,0 @@
import execa from 'execa'
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
import LocalHandler from './remote-handlers/local.js'
async function testVhdMerge () {
console.log('before merge')
const moOfRandom = 4
await execa('bash', [
'-c',
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
])
await execa('bash', [
'-c',
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
])
await execa('qemu-img', [
'convert',
'-f',
'raw',
'-Ovpc',
'randomfile',
'randomfile.vhd',
])
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
const childVhd = new Vhd(handler, 'randomfile.vhd')
console.log('changing type')
await childVhd.readHeaderAndFooter()
console.log('child vhd', childVhd.footer.currentSize, originalSize)
await childVhd.readBlockTable()
childVhd.footer.diskType = 4 // Delta backup.
await childVhd.writeFooter()
console.log('chained')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
console.log('merged')
const parentVhd = new Vhd(handler, 'empty.vhd')
await parentVhd.readHeaderAndFooter()
console.log('parent vhd', parentVhd.footer.currentSize)
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-Oraw',
'empty.vhd',
'recovered',
])
await execa('truncate', ['-s', originalSize, 'recovered'])
console.log('ls', (await execa('ls', ['-lt'])).stdout)
console.log(
'diff',
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
)
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
await vhd.ensureBatSize(300)
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
*/
console.log(await handler.list())
console.log('lol')
}
export { testVhdMerge as default }

View File

@@ -292,8 +292,7 @@ const TRANSFORMS = {
}
})(),
// TODO: there is two possible value: "best-effort" and "restart"
high_availability: Boolean(obj.ha_restart_priority),
high_availability: obj.ha_restart_priority,
memory: (function () {
const dynamicMin = +obj.memory_dynamic_min
@@ -327,6 +326,7 @@ const TRANSFORMS = {
other: otherConfig,
os_version: (guestMetrics && guestMetrics.os_version) || null,
power_state: obj.power_state,
hasVendorDevice: obj.has_vendor_device,
resourceSet,
snapshots: link(obj, 'snapshots'),
startTime: metrics && toTimestamp(metrics.start_time),

View File

@@ -994,11 +994,29 @@ export default class Xapi extends XapiBase {
// 2. Delete all VBDs which may have been created by the import.
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
// 3. Create VDIs.
const newVdis = await map(delta.vdis, async vdi => {
// 3. Create VDIs & VBDs.
const vbds = groupBy(delta.vbds, 'VDI')
const newVdis = await map(delta.vdis, async (vdi, vdiId) => {
let newVdi
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (!remoteBaseVdiUuid) {
const newVdi = await this.createVdi({
if (remoteBaseVdiUuid) {
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
} else {
newVdi = await this.createVdi({
...vdi,
other_config: {
...vdi.other_config,
@@ -1008,24 +1026,15 @@ export default class Xapi extends XapiBase {
sr: mapVdisSrs[vdi.uuid] || srId,
})
$defer.onFailure(() => this._deleteVdi(newVdi))
return newVdi
}
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
await asyncMap(vbds[vdiId], vbd =>
this.createVbd({
...vbd,
vdi: newVdi,
vm,
})
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
const newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
return newVdi
})::pAll()
@@ -1051,15 +1060,6 @@ export default class Xapi extends XapiBase {
let transferSize = 0
await Promise.all([
// Create VBDs.
asyncMap(delta.vbds, vbd =>
this.createVbd({
...vbd,
vdi: newVdis[vbd.VDI],
vm,
})
),
// Import VDI contents.
asyncMap(newVdis, async (vdi, id) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
@@ -1314,7 +1314,7 @@ export default class Xapi extends XapiBase {
async _importOvaVm (
$defer,
stream,
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus },
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables },
sr
) {
// 1. Create VM.
@@ -1387,8 +1387,9 @@ export default class Xapi extends XapiBase {
return
}
const vhdStream = await vmdkToVhd(stream)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_RAW)
const table = tables[entry.name]
const vhdStream = await vmdkToVhd(stream, table)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
// See: https://github.com/mafintosh/tar-stream#extracting
// No import parallelization.

View File

@@ -35,8 +35,17 @@ declare class XapiObject {
}
type Id = string | XapiObject
declare export class Vbd extends XapiObject {
type: string;
VDI: string;
}
declare export class Vm extends XapiObject {
$snapshots: Vm[];
$VBDs: Vbd[];
is_a_snapshot: boolean;
is_a_template: boolean;
name_label: string;
other_config: $Dict<string>;
snapshot_time: number;
@@ -56,7 +65,7 @@ declare export class Xapi {
_updateObjectMapProperty(
object: XapiObject,
property: string,
entries: $Dict<string>
entries: $Dict<null | string>
): Promise<void>;
_setObjectProperties(
object: XapiObject,

View File

@@ -1,9 +1,9 @@
import deferrable from 'golike-defer'
import every from 'lodash/every'
import find from 'lodash/find'
import filter from 'lodash/filter'
import includes from 'lodash/includes'
import isObject from 'lodash/isObject'
import pickBy from 'lodash/pickBy'
import some from 'lodash/some'
import sortBy from 'lodash/sortBy'
import assign from 'lodash/assign'
@@ -445,10 +445,10 @@ export default {
const installableByUuid =
host.license_params.sku_type !== 'free'
? await this._listMissingPoolPatchesOnHost(host)
: filter(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
// List of all installable patches sorted from the newest to the
// oldest.
@@ -488,7 +488,7 @@ export default {
patches =>
host.license_params.sku_type !== 'free'
? patches
: filter(patches, { paid: false, upgrade: false })
: pickBy(patches, { paid: false, upgrade: false })
)
}
})

View File

@@ -310,11 +310,7 @@ export default {
highAvailability: {
set (ha, vm) {
return this.call(
'VM.set_ha_restart_priority',
vm.$ref,
ha ? 'restart' : ''
)
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
},
},
@@ -384,6 +380,8 @@ export default {
tags: true,
hasVendorDevice: true,
vga: {
set (vga, vm) {
if (!includes(XEN_VGA_VALUES, vga)) {

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import {
endsWith,
filter,
find,
findIndex,
includes,
once,
range,
@@ -20,9 +19,13 @@ import {
startsWith,
trim,
} from 'lodash'
import {
chainVhd,
createSyntheticStream as createVhdReadStream,
mergeVhd,
} from 'vhd-lib'
import createSizeStream from '../size-stream'
import vhdMerge, { chainVhd } from '../vhd-merge'
import xapiObjectToXo from '../xapi-object-to-xo'
import { lvs, pvs } from '../lvm'
import {
@@ -551,7 +554,7 @@ export default class {
const backup = `${dir}/${backups[j]}`
try {
mergedDataSize += await vhdMerge(handler, parent, handler, backup)
mergedDataSize += await mergeVhd(handler, parent, handler, backup)
} catch (e) {
console.error('Unable to use vhd-util.', e)
throw e
@@ -566,33 +569,6 @@ export default class {
return mergedDataSize
}
async _listDeltaVdiDependencies (handler, filePath) {
const dir = dirname(filePath)
const filename = basename(filePath)
const backups = await this._listVdiBackups(handler, dir)
// Search file. (delta or full backup)
const i = findIndex(
backups,
backup => getVdiTimestamp(backup) === getVdiTimestamp(filename)
)
if (i === -1) {
throw new Error('VDI to import not found in this remote.')
}
// Search full backup.
let j
for (j = i; j >= 0 && isDeltaVdiBackup(backups[j]); j--);
if (j === -1) {
throw new Error(`Unable to found full vdi backup of: ${filePath}`)
}
return backups.slice(j, i + 1)
}
// -----------------------------------------------------------------
async _listDeltaVmBackups (handler, dir) {
@@ -840,17 +816,17 @@ export default class {
await Promise.all(
mapToArray(delta.vdis, async (vdi, id) => {
const vdisFolder = `${basePath}/${dirname(vdi.xoPath)}`
const backups = await this._listDeltaVdiDependencies(
handler,
`${basePath}/${vdi.xoPath}`
)
let path = `${basePath}/${vdi.xoPath}`
try {
await handler.getSize(path)
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
streams[`${id}.vhd`] = await Promise.all(
mapToArray(backups, async backup =>
handler.createReadStream(`${vdisFolder}/${backup}`)
)
)
path = path.replace(/_delta\.vhd$/, '_full.vhd')
}
streams[`${id}.vhd`] = await createVhdReadStream(handler, path)
})
)
@@ -1038,13 +1014,13 @@ export default class {
// VHD path may need to be fixed.
return endsWith(vhdPath, '_delta.vhd')
? pFromCallback(cb => stat(vhdPath, cb)).then(
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
}
}
}
)
)
: vhdPath
})
.then(vhdPath => execa('vhdimount', [vhdPath, mountDir]))

View File

@@ -0,0 +1,332 @@
import defer from 'golike-defer'
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser as createPairsParser } from 'parse-pairs'
import { normalize } from 'path'
import { readdir, rmdir, stat } from 'fs-extra'
import { ZipFile } from 'yazl'
import { lvs, pvs } from '../lvm'
import { resolveSubpath, tmpDir } from '../utils'
const IGNORED_PARTITION_TYPES = {
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
0x05: true,
0x0f: true,
0x15: true,
0x5e: true,
0x5f: true,
0x85: true,
0x91: true,
0x9b: true,
0xc5: true,
0xcf: true,
0xd5: true,
0x82: true, // swap
}
const PARTITION_TYPE_NAMES = {
0x07: 'NTFS',
0x0c: 'FAT',
0x83: 'linux',
}
const RE_VHDI = /^vhdi(\d+)$/
const parsePartxLine = createPairsParser({
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
valueTransform: (value, key) =>
key === 'start' || key === 'size'
? +value
: key === 'type' ? PARTITION_TYPE_NAMES[+value] || value : value,
})
const listLvmLogicalVolumes = defer(
async ($defer, devicePath, partition, results = []) => {
const pv = await mountLvmPhysicalVolume(devicePath, partition)
$defer(pv.unmount)
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], pv.path)
const partitionId = partition !== undefined ? partition.id : ''
lvs.forEach((lv, i) => {
const name = lv.lv_name
if (name !== '') {
results.push({
id: `${partitionId}/${lv.vg_name}/${name}`,
name,
size: lv.lv_size,
})
}
})
return results
}
)
async function mountLvmPhysicalVolume (devicePath, partition) {
const args = []
if (partition !== undefined) {
args.push('-o', partition.start * 512)
}
args.push('--show', '-f', devicePath)
const path = (await execa.stdout('losetup', args)).trim()
await execa('pvscan', ['--cache', path])
return {
path,
unmount: async () => {
try {
const vgNames = await pvs('vg_name', path)
await execa('vgchange', ['-an', ...vgNames])
} finally {
await execa('losetup', ['-d', path])
}
},
}
}
const mountPartition = defer(async ($defer, devicePath, partition) => {
const options = ['loop', 'ro']
if (partition !== undefined) {
const { start } = partition
if (start !== undefined) {
options.push(`offset=${start * 512}`)
}
}
const path = await tmpDir()
$defer.onFailure(rmdir, path)
const mount = options =>
execa('mount', [
`--options=${options.join(',')}`,
`--source=${devicePath}`,
`--target=${path}`,
])
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
// another fs, try without
try {
await mount([...options, 'norecovery'])
} catch (error) {
await mount(options)
}
const unmount = async () => {
await execa('umount', ['--lazy', path])
return rmdir(path)
}
$defer.onFailure(unmount)
return { path, unmount }
})
// - [x] list partitions
// - [x] list files in a partition
// - [x] list files in a bare partition
// - [x] list LVM partitions
//
// - [ ] partitions with unmount debounce
// - [ ] handle directory restore
// - [ ] handle multiple entries restore (both dirs and files)
// - [ ] by default use common path as root
// - [ ] handle LVM partitions on multiple disks
// - [ ] find mounted disks/partitions on start (in case of interruptions)
//
// - [ ] manual mount/unmount (of disk) for advance file restore
// - could it stay mounted during the backup process?
// - [ ] mountDisk (VHD)
// - [ ] unmountDisk (only for manual mount)
// - [ ] getMountedDisks
// - [ ] mountPartition (optional)
// - [ ] getMountedPartitions
// - [ ] unmountPartition
export default class BackupNgFileRestore {
constructor (app) {
this._app = app
this._mounts = { __proto__: null }
}
@defer
async fetchBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
paths
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer.onFailure(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer.onFailure(partition.unmount)
const zip = new ZipFile()
paths.forEach(file => {
zip.addFile(resolveSubpath(partition.path, file), normalize('./' + file))
})
zip.end()
return zip.outputStream.on('end', () =>
partition.unmount().then(disk.unmount)
)
}
@defer
async listBackupNgDiskPartitions ($defer, remoteId, diskId) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
return this._listPartitions(disk.path)
}
@defer
async listBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
path
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer(partition.unmount)
path = resolveSubpath(partition.path, path)
const entriesMap = {}
await Promise.all(
(await readdir(path)).map(async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
})
)
return entriesMap
}
async _findPartition (devicePath, partitionId) {
const partitions = await this._listPartitions(devicePath, false)
const partition = partitions.find(_ => _.id === partitionId)
if (partition === undefined) {
throw new Error(`partition ${partitionId} not found`)
}
return partition
}
async _listPartitions (devicePath, inspectLvmPv = true) {
const stdout = await execa.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
devicePath,
])
const promises = []
const partitions = []
splitLines(stdout).forEach(line => {
const partition = parsePartxLine(line)
let { type } = partition
if (type == null || (type = +type) in IGNORED_PARTITION_TYPES) {
return
}
if (inspectLvmPv && type === 0x8e) {
promises.push(listLvmLogicalVolumes(devicePath, partition, partitions))
return
}
partitions.push(partition)
})
await Promise.all(promises)
return partitions
}
@defer
async _mountDisk ($defer, remoteId, diskId) {
const handler = await this._app.getRemoteHandler(remoteId)
if (handler._getFilePath === undefined) {
throw new Error(`this remote is not supported`)
}
const diskPath = handler._getFilePath(diskId)
const mountDir = await tmpDir()
$defer.onFailure(rmdir, mountDir)
await execa('vhdimount', [diskPath, mountDir])
const unmount = async () => {
await execa('fusermount', ['-uz', mountDir])
return rmdir(mountDir)
}
$defer.onFailure(unmount)
let max = 0
let maxEntry
const entries = await readdir(mountDir)
entries.forEach(entry => {
const matches = RE_VHDI.exec(entry)
if (matches !== null) {
const value = +matches[1]
if (value > max) {
max = value
maxEntry = entry
}
}
})
if (max === 0) {
throw new Error('no disks found')
}
return {
path: `${mountDir}/${maxEntry}`,
unmount,
}
}
@defer
async _mountPartition ($defer, devicePath, partitionId) {
if (partitionId === undefined) {
return mountPartition(devicePath)
}
if (partitionId.includes('/')) {
const [pvId, vgName, lvName] = partitionId.split('/')
const lvmPartition =
pvId !== '' ? await this._findPartition(devicePath, pvId) : undefined
const pv = await mountLvmPhysicalVolume(devicePath, lvmPartition)
const unmountQueue = [pv.unmount]
const unmount = async () => {
let fn
while ((fn = unmountQueue.pop()) !== undefined) {
await fn()
}
}
$defer.onFailure(unmount)
await execa('vgchange', ['-ay', vgName])
unmountQueue.push(() => execa('vgchange', ['-an', vgName]))
const partition = await mountPartition(
(await lvs(['lv_name', 'lv_path'], vgName)).find(
_ => _.lv_name === lvName
).lv_path
)
unmountQueue.push(partition.unmount)
return { ...partition, unmount }
}
return mountPartition(
devicePath,
await this._findPartition(devicePath, partitionId)
)
}
}

View File

@@ -209,28 +209,43 @@ export default class Jobs {
throw new Error(`job ${id} is already running`)
}
const executor = this._executors[job.type]
const { type } = job
const executor = this._executors[type]
if (executor === undefined) {
throw new Error(`cannot run job ${id}: no executor for type ${job.type}`)
throw new Error(`cannot run job ${id}: no executor for type ${type}`)
}
let data
if (type === 'backup') {
// $FlowFixMe only defined for BackupJob
const settings = job.settings['']
data = {
// $FlowFixMe only defined for BackupJob
mode: job.mode,
reportWhen: (settings && settings.reportWhen) || 'failure',
}
}
const logger = this._logger
const runJobId = logger.notice(`Starting execution of ${id}.`, {
data,
event: 'job.start',
userId: job.userId,
jobId: id,
// $FlowFixMe only defined for CallJob
key: job.key,
type,
})
runningJobs[id] = runJobId
let session
try {
const app = this._app
const session = app.createUserConnection()
session = app.createUserConnection()
session.set('user_id', job.userId)
const status = await executor({
await executor({
app,
cancelToken,
job,
@@ -244,8 +259,7 @@ export default class Jobs {
runJobId,
})
session.close()
app.emit('job:terminated', status)
app.emit('job:terminated', runJobId, job, schedule)
} catch (error) {
logger.error(`The execution of ${id} has failed.`, {
event: 'job.end',
@@ -255,6 +269,9 @@ export default class Jobs {
throw error
} finally {
delete runningJobs[id]
if (session !== undefined) {
session.close()
}
}
}

View File

@@ -32,11 +32,11 @@ export default class Logs {
const onData =
keep !== 0
? () => {
if (--keep === 0) {
stream.on('data', deleteEntry)
stream.removeListener('data', onData)
if (--keep === 0) {
stream.on('data', deleteEntry)
stream.removeListener('data', onData)
}
}
}
: deleteEntry
stream.on('data', onData)
@@ -51,4 +51,22 @@ export default class Logs {
.getStore('logs')
.then(store => new LevelDbLogger(store, namespace))
}
async getLogs (namespace) {
const logger = await this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger
.createReadStream()
.on('data', data => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
}
}

View File

@@ -1,7 +1,7 @@
import { getHandler } from '@xen-orchestra/fs'
import { noSuchObject } from 'xo-common/api-errors'
import { forEach, mapToArray } from '../utils'
import { getHandler } from '../remote-handlers'
import { Remotes } from '../models/remote'
// ===================================================================

View File

@@ -1,7 +1,7 @@
// @flow
import mergeVhd_ from '../../vhd-merge'
import { type Remote, getHandler } from '../../remote-handlers'
import { type Remote, getHandler } from '@xen-orchestra/fs'
import { mergeVhd as mergeVhd_ } from 'vhd-lib'
export function mergeVhd (
parentRemote: Remote,

View File

@@ -140,7 +140,11 @@ export default class Xo extends EventEmitter {
}).then(
result => {
if (result != null) {
res.end(JSON.stringify(result))
if (typeof result.pipe === 'function') {
result.pipe(res)
} else {
res.end(JSON.stringify(result))
}
}
},
error => {

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "xo-vmdk-to-vhd",
"version": "0.0.12",
"version": "0.1.1",
"license": "AGPL-3.0",
"description": "JS lib streaming a vmdk file to a vhd",
"keywords": [
@@ -23,44 +23,33 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.18.0",
"@babel/runtime": "^7.0.0-beta.44",
"child-process-promise": "^2.0.3",
"deflate-js": "^0.2.3",
"fs-promise": "^2.0.0",
"pipette": "^0.9.3"
"pipette": "^0.9.3",
"promise-toolbox": "^0.9.5",
"tmp": "^0.0.33",
"vhd-lib": "^0.1.0"
},
"devDependencies": {
"babel-cli": "^6.18.0",
"babel-plugin-transform-runtime": "^6.15.0",
"babel-preset-env": "^1.0.0",
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"mocha": "^4.0.1",
"must": "^0.13.2",
"rimraf": "^2.5.4"
"event-to-promise": "^0.8.0",
"execa": "^0.10.0",
"fs-extra": "^5.0.0",
"get-stream": "^3.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"dev-test": "mocha --watch --reporter=min \"dist/**/*.spec.js\"",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"test-disabled": "mocha \"dist/**/*.spec.js\""
},
"babel": {
"plugins": [
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
]
]
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -1 +1,16 @@
export { convertFromVMDK as default } from './vhd-write'
import { createReadableSparseStream } from 'vhd-lib'
import { VMDKDirectParser, readVmdkGrainTable } from './vmdk-read'
async function convertFromVMDK (vmdkReadStream, table) {
const parser = new VMDKDirectParser(vmdkReadStream)
const header = await parser.readHeader()
return createReadableSparseStream(
header.capacitySectors * 512,
header.grainSizeSectors * 512,
table,
parser.blockIterator()
)
}
export { convertFromVMDK as default, readVmdkGrainTable }

View File

@@ -1,340 +0,0 @@
'use strict'
import { open, write } from 'fs-promise'
import stream from 'stream'
import { VMDKDirectParser } from './vmdk-read'
const footerCookie = 'conectix'
const creatorApp = 'xo '
// it looks like everybody is using Wi2k
const osString = 'Wi2k'
const headerCookie = 'cxsparse'
const fixedHardDiskType = 2
const dynamicHardDiskType = 3
const sectorSize = 512
export function computeChecksum (buffer) {
let sum = 0
for (let i = 0; i < buffer.length; i++) {
sum += buffer[i]
}
// http://stackoverflow.com/a/1908655/72637 the >>> prevents the number from going negative
return ~sum >>> 0
}
class Block {
constructor (blockSize) {
const bitmapSize = blockSize / sectorSize / 8
const bufferSize =
Math.ceil((blockSize + bitmapSize) / sectorSize) * sectorSize
this.buffer = Buffer.alloc(bufferSize)
this.bitmapBuffer = this.buffer.slice(0, bitmapSize)
this.dataBuffer = this.buffer.slice(bitmapSize)
this.bitmapBuffer.fill(0xff)
}
writeData (buffer, offset = 0) {
buffer.copy(this.dataBuffer, offset)
}
async writeOnFile (file) {
await write(file, this.buffer, 0, this.buffer.length)
}
}
class SparseExtent {
constructor (dataSize, blockSize, startOffset) {
this.table = createEmptyTable(dataSize, blockSize)
this.blockSize = blockSize
this.startOffset = (startOffset + this.table.buffer.length) / sectorSize
}
get entryCount () {
return this.table.entryCount
}
_writeBlock (blockBuffer, tableIndex, offset) {
if (blockBuffer.length + offset > this.blockSize) {
throw new Error('invalid block geometry')
}
let entry = this.table.entries[tableIndex]
if (entry === undefined) {
entry = new Block(this.blockSize)
this.table.entries[tableIndex] = entry
}
entry.writeData(blockBuffer, offset)
}
writeBuffer (buffer, offset = 0) {
const startBlock = Math.floor(offset / this.blockSize)
const endBlock = Math.ceil((offset + buffer.length) / this.blockSize)
for (let i = startBlock; i < endBlock; i++) {
const blockDelta = offset - i * this.blockSize
let blockBuffer, blockOffset
if (blockDelta > 0) {
blockBuffer = buffer.slice(0, (i + 1) * this.blockSize - offset)
blockOffset = blockDelta
} else {
blockBuffer = buffer.slice(
-blockDelta,
(i + 1) * this.blockSize - offset
)
blockOffset = 0
}
this._writeBlock(blockBuffer, i, blockOffset)
}
}
async writeOnFile (file) {
let currentOffset = this.startOffset
for (let i = 0; i < this.table.entryCount; i++) {
const block = this.table.entries[i]
if (block !== undefined) {
this.table.buffer.writeUInt32BE(currentOffset, i * 4)
currentOffset += block.buffer.length / sectorSize
}
}
await write(file, this.table.buffer, 0, this.table.buffer.length)
for (let i = 0; i < this.table.entryCount; i++) {
const block = this.table.entries[i]
if (block !== undefined) {
await block.writeOnFile(file)
}
}
}
}
export class VHDFile {
constructor (virtualSize, timestamp) {
this.geomtry = computeGeometryForSize(virtualSize)
this.timestamp = timestamp
this.blockSize = 0x00200000
this.sparseFile = new SparseExtent(
this.geomtry.actualSize,
this.blockSize,
sectorSize * 3
)
}
writeBuffer (buffer, offset = 0) {
this.sparseFile.writeBuffer(buffer, offset)
}
async writeFile (fileName) {
const fileFooter = createFooter(
this.geomtry.actualSize,
this.timestamp,
this.geomtry,
dynamicHardDiskType,
512,
0
)
const diskHeader = createDynamicDiskHeader(
this.sparseFile.entryCount,
this.blockSize
)
const file = await open(fileName, 'w')
await write(file, fileFooter, 0, fileFooter.length)
await write(file, diskHeader, 0, diskHeader.length)
await this.sparseFile.writeOnFile(file)
await write(file, fileFooter, 0, fileFooter.length)
}
}
export function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrack
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrack = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
} else {
sectorsPerTrack = 17
cylinderTimesHeads = totalSectors / sectorsPerTrack
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrack = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrack = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrack
}
}
const cylinders = Math.floor(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrack * sectorSize
return { cylinders, heads, sectorsPerTrack, actualSize }
}
export function createFooter (
size,
timestamp,
geometry,
diskType,
dataOffsetLow = 0xffffffff,
dataOffsetHigh = 0xffffffff
) {
const footer = Buffer.alloc(512)
Buffer.from(footerCookie, 'ascii').copy(footer)
footer.writeUInt32BE(2, 8)
footer.writeUInt32BE(0x00010000, 12)
footer.writeUInt32BE(dataOffsetHigh, 16)
footer.writeUInt32BE(dataOffsetLow, 20)
footer.writeUInt32BE(timestamp, 24)
Buffer.from(creatorApp, 'ascii').copy(footer, 28)
Buffer.from(osString, 'ascii').copy(footer, 36)
// do not use & 0xFFFFFFFF to extract lower bits, that would propagate a negative sign if the 2^31 bit is one
const sizeHigh = Math.floor(size / Math.pow(2, 32)) % Math.pow(2, 32)
const sizeLow = size % Math.pow(2, 32)
footer.writeUInt32BE(sizeHigh, 40)
footer.writeUInt32BE(sizeLow, 44)
footer.writeUInt32BE(sizeHigh, 48)
footer.writeUInt32BE(sizeLow, 52)
footer.writeUInt16BE(geometry['cylinders'], 56)
footer.writeUInt8(geometry['heads'], 58)
footer.writeUInt8(geometry['sectorsPerTrack'], 59)
footer.writeUInt32BE(diskType, 60)
const checksum = computeChecksum(footer)
footer.writeUInt32BE(checksum, 64)
return footer
}
export function createDynamicDiskHeader (tableEntries, blockSize) {
const header = Buffer.alloc(1024)
Buffer.from(headerCookie, 'ascii').copy(header)
// hard code no next data
header.writeUInt32BE(0xffffffff, 8)
header.writeUInt32BE(0xffffffff, 12)
// hard code table offset
header.writeUInt32BE(0, 16)
header.writeUInt32BE(sectorSize * 3, 20)
header.writeUInt32BE(0x00010000, 24)
header.writeUInt32BE(tableEntries, 28)
header.writeUInt32BE(blockSize, 32)
const checksum = computeChecksum(header)
header.writeUInt32BE(checksum, 36)
return header
}
export function createEmptyTable (dataSize, blockSize) {
const blockCount = Math.ceil(dataSize / blockSize)
const tableSizeSectors = Math.ceil(blockCount * 4 / sectorSize)
const buffer = Buffer.alloc(tableSizeSectors * sectorSize, 0xff)
return { entryCount: blockCount, buffer: buffer, entries: [] }
}
export class ReadableRawVHDStream extends stream.Readable {
constructor (size, vmdkParser) {
super()
this.size = size
const geometry = computeGeometryForSize(size)
this.footer = createFooter(
size,
Math.floor(Date.now() / 1000),
geometry,
fixedHardDiskType
)
this.position = 0
this.vmdkParser = vmdkParser
this.done = false
this.busy = false
this.currentFile = []
}
filePadding (paddingLength) {
if (paddingLength !== 0) {
const chunkSize = 1024 * 1024 // 1Mo
const chunkCount = Math.floor(paddingLength / chunkSize)
for (let i = 0; i < chunkCount; i++) {
this.currentFile.push(() => {
const paddingBuffer = Buffer.alloc(chunkSize)
return paddingBuffer
})
}
this.currentFile.push(() => {
const paddingBuffer = Buffer.alloc(paddingLength % chunkSize)
return paddingBuffer
})
}
}
async pushNextBlock () {
const next = await this.vmdkParser.next()
if (next === null) {
const paddingLength = this.size - this.position
this.filePadding(paddingLength)
this.currentFile.push(() => this.footer)
this.currentFile.push(() => {
this.done = true
return null
})
} else {
const offset = next.lbaBytes
const buffer = next.grain
const paddingLength = offset - this.position
if (paddingLength < 0) {
process.nextTick(() =>
this.emit(
'error',
'This VMDK file does not have its blocks in the correct order'
)
)
}
this.filePadding(paddingLength)
this.currentFile.push(() => buffer)
this.position = offset + buffer.length
}
return this.pushFileUntilFull()
}
// returns true if the file is empty
pushFileUntilFull () {
while (true) {
if (this.currentFile.length === 0) {
break
}
const result = this.push(this.currentFile.shift()())
if (!result) {
break
}
}
return this.currentFile.length === 0
}
async pushNextUntilFull () {
while (!this.done && (await this.pushNextBlock())) {}
}
_read () {
if (this.busy || this.done) {
return
}
if (this.pushFileUntilFull()) {
this.busy = true
this.pushNextUntilFull()
.then(() => {
this.busy = false
})
.catch(error => {
process.nextTick(() => this.emit('error', error))
})
}
}
}
export async function convertFromVMDK (vmdkReadStream) {
const parser = new VMDKDirectParser(vmdkReadStream)
const header = await parser.readHeader()
return new ReadableRawVHDStream(header.capacitySectors * sectorSize, parser)
}

View File

@@ -1,137 +0,0 @@
'use strict'
import expect from 'must'
import { createWriteStream } from 'fs'
import { describe, it } from 'mocha'
import { exec } from 'child-process-promise'
import { readFile } from 'fs-promise'
import {
computeChecksum,
computeGeometryForSize,
createDynamicDiskHeader,
createFooter,
ReadableRawVHDStream,
VHDFile,
} from './vhd-write'
describe('VHD writing', () => {
it('computeChecksum() is correct against some reference values', () => {
// those values were taken from a file generated by qemu
const testValue1 =
'636F6E6563746978000000020001000000000000000002001F34DB9F71656D75000500035769326B0000000000019800000000000001980000030411000000030000000033B3A5E17F94433498376740246E5660'
const expectedChecksum1 = 0xffffefb2
const testValue2 =
'6378737061727365FFFFFFFFFFFFFFFF0000000000000600000100000000000100200000'
const expectedChecksum2 = 0xfffff476
expect(computeChecksum(Buffer.from(testValue1, 'hex'))).to.equal(
expectedChecksum1
)
expect(computeChecksum(Buffer.from(testValue2, 'hex'))).to.equal(
expectedChecksum2
)
})
it('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
it('createDynamicDiskHeader() does not crash', () => {
createDynamicDiskHeader(1, 0x00200000)
})
it('ReadableRawVHDStream does not crash', () => {
const data = [
{
lbaBytes: 100,
grain: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
lbaBytes: 700,
grain: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const stream = new ReadableRawVHDStream(100000, mockParser)
const pipe = stream.pipe(createWriteStream('outputStream'))
return new Promise((resolve, reject) => {
pipe.on('finish', resolve)
pipe.on('error', reject)
})
})
it('ReadableRawVHDStream detects when blocks are out of order', () => {
const data = [
{
lbaBytes: 700,
grain: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
lbaBytes: 100,
grain: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = new ReadableRawVHDStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
})
).to.reject.to.equal(
'This VMDK file does not have its blocks in the correct order'
)
})
it('writing a known file with VHDFile is successful', async () => {
const fileName = 'output.vhd'
const rawFilename = 'output.raw'
const randomFileName = 'random.raw'
const geometry = computeGeometryForSize(1024 * 1024 * 8)
const dataSize = geometry.actualSize
await exec(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + randomFileName
)
const buffer = await readFile(randomFileName)
const f = new VHDFile(buffer.length, 523557791)
const splitPoint = Math.floor(Math.random() * buffer.length)
f.writeBuffer(buffer.slice(splitPoint), splitPoint)
f.writeBuffer(buffer.slice(0, splitPoint), 0)
f.writeBuffer(buffer.slice(splitPoint), splitPoint)
await f.writeFile(fileName)
await exec('qemu-img convert -fvpc -Oraw ' + fileName + ' ' + rawFilename)
const fileContent = await readFile(rawFilename)
expect(fileContent.length).to.equal(dataSize)
for (let i = 0; i < fileContent.length; i++) {
expect(fileContent[i]).to.equal(buffer[i])
}
})
})

View File

@@ -0,0 +1,35 @@
/* eslint-env jest */
import { createReadStream, readFile } from 'fs-extra'
import { exec } from 'child-process-promise'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { VirtualBuffer } from './virtual-buffer'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('Virtual Buffer can read a file correctly', async () => {
const rawFileName = 'random-data'
await exec('base64 /dev/urandom | head -c 1048 > ' + rawFileName)
const buffer = new VirtualBuffer(createReadStream(rawFileName))
const part1 = await buffer.readChunk(10)
const part2 = await buffer.readChunk(1038)
const original = await readFile(rawFileName)
expect(buffer.isDepleted).toBeTruthy()
expect(Buffer.concat([part1, part2]).toString('ascii')).toEqual(
original.toString('ascii')
)
})

View File

@@ -2,8 +2,6 @@
import { Slicer } from 'pipette'
const chunkSize = 1024 * 1024
export class VirtualBuffer {
constructor (readStream) {
this.slicer = new Slicer(readStream)
@@ -15,43 +13,23 @@ export class VirtualBuffer {
return !this.slicer.readable
}
// length = -1 means 'until the end'
async readChunk (length, label) {
const _this = this
if (this.promise !== null) {
throw new Error('pomise already there !!!', this.promise)
}
if (length === -1) {
const chunks = []
let error = false
do {
const res = await new Promise((resolve, reject) => {
this.slicer.read(chunkSize, (error, length, data, offset) => {
if (error !== false && error !== true) {
reject(error)
} else {
resolve({ error, data })
}
})
})
error = res.error
chunks.push(res.data)
} while (error === false)
return Buffer.concat(chunks)
} else {
this.promise = label
return new Promise((resolve, reject) => {
this.slicer.read(length, (error, actualLength, data, offset) => {
if (error !== false && error !== true) {
_this.promise = null
reject(error)
} else {
_this.promise = null
_this.position += data.length
resolve(data)
}
})
this.promise = label
return new Promise((resolve, reject) => {
this.slicer.read(length, (error, actualLength, data, offset) => {
if (error !== false && error !== true) {
_this.promise = null
reject(error)
} else {
_this.promise = null
_this.position += data.length
resolve(data)
}
})
}
})
}
}

View File

@@ -1,21 +0,0 @@
import expect from 'must'
import { createReadStream, readFile } from 'fs-promise'
import { describe, it } from 'mocha'
import { exec } from 'child-process-promise'
import { VirtualBuffer } from './virtual-buffer'
describe('Virtual Buffer', function () {
it('can read a file correctly', async () => {
const rawFileName = 'random-data'
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
const buffer = new VirtualBuffer(createReadStream(rawFileName))
const part1 = await buffer.readChunk(10)
const part2 = await buffer.readChunk(-1)
const original = await readFile(rawFileName)
expect(buffer.isDepleted).to.be.true()
expect(Buffer.concat([part1, part2]).toString('ascii')).to.equal(
original.toString('ascii')
)
})
})

View File

@@ -0,0 +1,48 @@
/* eslint-env jest */
import { createReadStream } from 'fs-extra'
import { exec } from 'child-process-promise'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { VMDKDirectParser } from './vmdk-read'
jest.setTimeout(10000)
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('VMDKDirectParser reads OK', async () => {
const rawFileName = 'random-data'
const fileName = 'random-data.vmdk'
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
await exec(
'rm -f ' +
fileName +
'&& python /usr/share/pyshared/VMDKstream.py ' +
rawFileName +
' ' +
fileName
)
const parser = new VMDKDirectParser(createReadStream(fileName))
const header = await parser.readHeader()
const harvested = []
for await (const res of parser.blockIterator()) {
harvested.push(res)
}
expect(harvested.length).toEqual(2)
expect(harvested[0].offsetBytes).toEqual(0)
expect(harvested[0].data.length).toEqual(header['grainSizeSectors'] * 512)
expect(harvested[1].offsetBytes).toEqual(header['grainSizeSectors'] * 512)
})

View File

@@ -1,6 +1,7 @@
'use strict'
import zlib from 'zlib'
import { VirtualBuffer } from './virtual-buffer'
const sectorSize = 512
@@ -266,7 +267,7 @@ export class VMDKDirectParser {
return this.header
}
async next () {
async * blockIterator () {
while (!this.virtualBuffer.isDepleted) {
const position = this.virtualBuffer.position
const sector = await this.virtualBuffer.readChunk(
@@ -293,79 +294,71 @@ export class VMDKDirectParser {
'grain remainder ' + this.virtualBuffer.position
)
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
return readGrain(
const grain = await readGrain(
0,
grainBuffer,
this.header.compressionMethod === compressionDeflate &&
this.header.flags.compressedGrains
)
}
}
return new Promise(resolve => resolve(null))
}
}
export async function readRawContent (readStream) {
const virtualBuffer = new VirtualBuffer(readStream)
const headerBuffer = await virtualBuffer.readChunk(512, 'header')
let header = parseHeader(headerBuffer)
// I think the multiplications are OK, because the descriptor is always at the beginning of the file
const descriptorLength = header.descriptorSizeSectors * sectorSize
const descriptorBuffer = await virtualBuffer.readChunk(
descriptorLength,
'descriptor'
)
const descriptor = parseDescriptor(descriptorBuffer)
// TODO: we concat them back for now so that the indices match, we'll have to introduce a bias later
const remainingBuffer = await virtualBuffer.readChunk(-1, 'remainder')
const buffer = Buffer.concat([
headerBuffer,
descriptorBuffer,
remainingBuffer,
])
if (header.grainDirectoryOffsetSectors === -1) {
header = parseHeader(buffer.slice(-1024, -1024 + sectorSize))
}
const rawOutputBuffer = Buffer.alloc(header.capacitySectors * sectorSize)
const l1Size = Math.floor(
(header.capacitySectors + header.l1EntrySectors - 1) / header.l1EntrySectors
)
const l2Size = header.numGTEsPerGT
const l1 = []
for (let i = 0; i < l1Size; i++) {
const l1Entry = buffer.readUInt32LE(
header.grainDirectoryOffsetSectors * sectorSize + 4 * i
)
if (l1Entry !== 0) {
l1.push(l1Entry)
const l2 = []
for (let j = 0; j < l2Size; j++) {
const l2Entry = buffer.readUInt32LE(l1Entry * sectorSize + 4 * j)
if (l2Entry !== 0 && l2Entry !== 1) {
const grain = await readGrain(
l2Entry,
buffer,
header['flags']['compressedGrains']
)
grain.grain.copy(rawOutputBuffer, grain.lba * sectorSize)
l2[j] = grain
}
yield { offsetBytes: grain.lbaBytes, data: grain.grain }
}
}
}
const vmdkType = descriptor['descriptor']['createType']
if (!vmdkType || vmdkType.toLowerCase() !== 'streamOptimized'.toLowerCase()) {
throw new Error(
'unsupported VMDK type "' +
vmdkType +
'", only streamOptimized is supported'
)
}
return {
descriptor: descriptor.descriptor,
extents: descriptor.extents,
rawFile: rawOutputBuffer,
}
}
export async function readVmdkGrainTable (fileAccessor) {
let headerBuffer = await fileAccessor(0, 512)
let grainAddrBuffer = headerBuffer.slice(56, 56 + 8)
if (
new Int8Array(grainAddrBuffer).reduce((acc, val) => acc && val === -1, true)
) {
headerBuffer = await fileAccessor(-1024, -1024 + 512)
grainAddrBuffer = headerBuffer.slice(56, 56 + 8)
}
const grainDirPosBytes =
new DataView(grainAddrBuffer).getUint32(0, true) * 512
const capacity =
new DataView(headerBuffer.slice(12, 12 + 8)).getUint32(0, true) * 512
const grainSize =
new DataView(headerBuffer.slice(20, 20 + 8)).getUint32(0, true) * 512
const grainCount = Math.ceil(capacity / grainSize)
const numGTEsPerGT = new DataView(headerBuffer.slice(44, 44 + 8)).getUint32(
0,
true
)
const grainTablePhysicalSize = numGTEsPerGT * 4
const grainDirectoryEntries = Math.ceil(grainCount / numGTEsPerGT)
const grainDirectoryPhysicalSize = grainDirectoryEntries * 4
const grainDirBuffer = await fileAccessor(
grainDirPosBytes,
grainDirPosBytes + grainDirectoryPhysicalSize
)
const grainDir = new Uint32Array(grainDirBuffer)
const cachedGrainTables = []
for (let i = 0; i < grainDirectoryEntries; i++) {
const grainTableAddr = grainDir[i] * 512
if (grainTableAddr !== 0) {
cachedGrainTables[i] = new Uint32Array(
await fileAccessor(
grainTableAddr,
grainTableAddr + grainTablePhysicalSize
)
)
}
}
const extractedGrainTable = []
for (let i = 0; i < grainCount; i++) {
const directoryEntry = Math.floor(i / numGTEsPerGT)
const grainTable = cachedGrainTables[directoryEntry]
if (grainTable !== undefined) {
const grainAddr = grainTable[i % numGTEsPerGT]
if (grainAddr !== 0) {
extractedGrainTable.push([i, grainAddr])
}
}
}
extractedGrainTable.sort(
([i1, grainAddress1], [i2, grainAddress2]) => grainAddress1 - grainAddress2
)
return extractedGrainTable.map(([index, grainAddress]) => index * grainSize)
}

View File

@@ -1,35 +0,0 @@
import expect from 'must'
import { createReadStream } from 'fs-promise'
import { describe, it } from 'mocha'
import { exec } from 'child-process-promise'
import { VMDKDirectParser } from './vmdk-read'
describe('VMDK reading', () => {
it('VMDKDirectParser reads OK', async () => {
const rawFileName = 'random-data'
const fileName = 'random-data.vmdk'
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
await exec(
'rm -f ' +
fileName +
'&& VBoxManage convertfromraw --format VMDK --variant Stream ' +
rawFileName +
' ' +
fileName
)
const parser = new VMDKDirectParser(createReadStream(fileName))
const header = await parser.readHeader()
const harvested = []
while (true) {
const res = await parser.next()
if (res === null) {
break
}
harvested.push(res)
}
expect(harvested.length).to.equal(2)
expect(harvested[0].lba).to.equal(0)
expect(harvested[1].lba).to.equal(header['grainSizeSectors'])
}).timeout(10000)
})

View File

@@ -0,0 +1,92 @@
/* eslint-env jest */
import execa from 'execa'
import eventToPromise from 'event-to-promise'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createReadStream, createWriteStream, stat } from 'fs-extra'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import convertFromVMDK, { readVmdkGrainTable } from '.'
const initialDir = process.cwd()
jest.setTimeout(100000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
function createFileAccessor (file) {
return async (start, end) => {
if (start < 0 || end < 0) {
const fileLength = (await stat(file)).size
start = start < 0 ? fileLength + start : start
end = end < 0 ? fileLength + end : end
}
const result = await getStream.buffer(
createReadStream(file, { start, end: end - 1 })
)
// crazy stuff to get a browser-compatible ArrayBuffer from a node buffer
// https://stackoverflow.com/a/31394257/72637
return result.buffer.slice(
result.byteOffset,
result.byteOffset + result.byteLength
)
}
}
test('VMDK to VHD can convert a random data file with VMDKDirectParser', async () => {
const inputRawFileName = 'random-data.raw'
const vmdkFileName = 'random-data.vmdk'
const vhdFileName = 'from-vmdk-VMDKDirectParser.vhd'
const reconvertedFromVhd = 'from-vhd.raw'
const reconvertedFromVmdk = 'from-vhd-by-vbox.raw'
const dataSize = 100 * 1024 * 1024 // this number is an integer head/cylinder/count equation solution
try {
await execa.shell(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
)
await execa.shell(
'python /usr/share/pyshared/VMDKstream.py ' +
inputRawFileName +
' ' +
vmdkFileName
)
const result = await readVmdkGrainTable(createFileAccessor(vmdkFileName))
const pipe = (await convertFromVMDK(
createReadStream(vmdkFileName),
result
)).pipe(createWriteStream(vhdFileName))
await eventToPromise(pipe, 'finish')
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdFileName])
await execa('qemu-img', [
'convert',
'-fvmdk',
'-Oraw',
vmdkFileName,
reconvertedFromVmdk,
])
await execa('qemu-img', [
'convert',
'-fvpc',
'-Oraw',
vhdFileName,
reconvertedFromVhd,
])
await execa('qemu-img', ['compare', inputRawFileName, vhdFileName])
await execa('qemu-img', ['compare', vmdkFileName, vhdFileName])
} catch (error) {
console.error(error.stdout)
console.error(error.stderr)
console.error(error.message)
throw error
}
})

View File

@@ -1,115 +0,0 @@
'use strict'
import { describe, it } from 'mocha'
import { exec } from 'child-process-promise'
import { createReadStream, createWriteStream } from 'fs-promise'
import { readRawContent } from './vmdk-read'
import { VHDFile, convertFromVMDK, computeGeometryForSize } from './vhd-write'
describe('VMDK to VHD conversion', () => {
it('can convert a random data file with readRawContent()', async () => {
const inputRawFileName = 'random-data.raw'
const vmdkFileName = 'random-data.vmdk'
const vhdFileName = 'from-vmdk-readRawContent.vhd'
const reconvertedRawFilemane = 'from-vhd.raw'
const dataSize = 5222400
await exec(
'rm -f ' +
[
inputRawFileName,
vmdkFileName,
vhdFileName,
reconvertedRawFilemane,
].join(' ')
)
await exec(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
)
await exec(
'VBoxManage convertfromraw --format VMDK --variant Stream ' +
inputRawFileName +
' ' +
vmdkFileName
)
const rawContent = (await readRawContent(createReadStream(vmdkFileName)))
.rawFile
const f = new VHDFile(rawContent.length, 523557791)
await f.writeBuffer(rawContent)
await f.writeFile(vhdFileName)
await exec(
'qemu-img convert -fvpc -Oraw ' +
vhdFileName +
' ' +
reconvertedRawFilemane
)
return exec('qemu-img compare ' + vmdkFileName + ' ' + vhdFileName).catch(
error => {
console.error(error.stdout)
console.error(error.stderr)
console.error(vhdFileName, vmdkFileName, error.message)
throw error
}
)
})
it('can convert a random data file with VMDKDirectParser', async () => {
const inputRawFileName = 'random-data.raw'
const vmdkFileName = 'random-data.vmdk'
const vhdFileName = 'from-vmdk-VMDKDirectParser.vhd'
const reconvertedRawFilemane = 'from-vhd.raw'
const reconvertedByVBoxRawFilemane = 'from-vhd-by-vbox.raw'
const dataSize = computeGeometryForSize(8 * 1024 * 1024).actualSize
await exec(
'rm -f ' +
[
inputRawFileName,
vmdkFileName,
vhdFileName,
reconvertedRawFilemane,
reconvertedByVBoxRawFilemane,
].join(' ')
)
await exec(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
)
await exec(
'VBoxManage convertfromraw --format VMDK --variant Stream ' +
inputRawFileName +
' ' +
vmdkFileName
)
const pipe = (await convertFromVMDK(createReadStream(vmdkFileName))).pipe(
createWriteStream(vhdFileName)
)
await new Promise((resolve, reject) => {
pipe.on('finish', resolve)
pipe.on('error', reject)
})
await exec(
'qemu-img convert -fvmdk -Oraw ' +
vmdkFileName +
' ' +
reconvertedByVBoxRawFilemane
)
await exec(
'qemu-img convert -fvpc -Oraw ' +
vhdFileName +
' ' +
reconvertedRawFilemane
)
return exec(
'qemu-img compare ' +
reconvertedByVBoxRawFilemane +
' ' +
reconvertedRawFilemane
).catch(error => {
console.error(error.stdout)
console.error(error.stderr)
console.error(vhdFileName, vmdkFileName, error.message)
throw error
})
})
})

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "xo-web",
"version": "5.18.3",
"version": "5.19.5",
"license": "AGPL-3.0",
"description": "Web interface client for Xen-Orchestra",
"keywords": [
@@ -59,6 +59,7 @@
"classnames": "^2.2.3",
"complex-matcher": "^0.3.0",
"cookies-js": "^1.2.2",
"copy-to-clipboard": "^3.0.8",
"d3": "^5.0.0",
"debounce-input-decorator": "^0.1.0",
"enzyme": "^3.3.0",
@@ -125,7 +126,6 @@
"rimraf": "^2.6.2",
"semver": "^5.4.1",
"styled-components": "^3.1.5",
"tar-stream": "^1.5.5",
"uglify-es": "^3.3.4",
"uncontrollable-input": "^0.1.1",
"url-parse": "^1.2.0",
@@ -137,7 +137,8 @@
"xo-acl-resolver": "^0.2.3",
"xo-common": "^0.1.1",
"xo-lib": "^0.8.0",
"xo-remote-parser": "^0.3"
"xo-remote-parser": "^0.3",
"xo-vmdk-to-vhd": "^0.1.1"
},
"scripts": {
"build": "NODE_ENV=production gulp build",

View File

@@ -90,7 +90,7 @@ export default class ActionButton extends Component {
if (redirectOnSuccess) {
return this.context.router.push(
isFunction(redirectOnSuccess)
? redirectOnSuccess(result)
? redirectOnSuccess(result, handlerParam)
: redirectOnSuccess
)
}

View File

@@ -18,7 +18,13 @@ export const EllipsisContainer = ({ children }) => (
{React.Children.map(
children,
child =>
child == null || child.type === Ellipsis ? child : <span>{child}</span>
child == null ||
child.type === Ellipsis ||
(child.type != null && child.type.originalRender === Ellipsis) ? (
child
) : (
<span>{child}</span>
)
)}
</div>
)

View File

@@ -1,10 +1,9 @@
import isFunction from 'lodash/isFunction'
import isString from 'lodash/isString'
import moment from 'moment'
import PropTypes from 'prop-types'
import React, { Component } from 'react'
import { connect } from 'react-redux'
import { FormattedMessage, IntlProvider as IntlProvider_ } from 'react-intl'
import { every, isFunction, isString } from 'lodash'
import locales from './locales'
import messages from './messages'
@@ -102,8 +101,16 @@ export class FormattedDuration extends Component {
)
render () {
const parsedDuration = this._parseDuration()
return (
<Tooltip content={getMessage('durationFormat', this._parseDuration())}>
<Tooltip
content={getMessage(
every(parsedDuration, n => n === 0)
? 'secondsFormat'
: 'durationFormat',
parsedDuration
)}
>
<span>{this._humanizeDuration()}</span>
</Tooltip>
)

View File

@@ -3857,7 +3857,8 @@ export default {
xosanUsedSpace: 'Espace utilisé',
// Original text: "XOSAN pack needs to be installed on each host of the pool."
xosanNeedPack: 'La pack XOSAN doit être installé sur tous les hôtes du pool.',
xosanNeedPack:
'Le pack XOSAN doit être installé et à jour sur tous les hôtes du pool.',
// Original text: "Install it now!"
xosanInstallIt: 'Installer maintenant !',

View File

@@ -41,6 +41,7 @@ const messages = {
// ----- Copiable component -----
copyToClipboard: 'Copy to clipboard',
copyUuid: 'Copy {uuid}',
// ----- Pills -----
pillMaster: 'Master',
@@ -162,6 +163,8 @@ const messages = {
homeSortByUsage: 'Usage',
homeSortByvCPUs: 'vCPUs',
homeSortVmsBySnapshots: 'Snapshots',
homeSortByContainer: 'Container',
homeSortByPool: 'Pool',
homeDisplayedItems: '{displayed, number}x {icon} (on {total, number})',
homeSelectedItems: '{selected, number}x {icon} selected (on {total, number})',
homeMore: 'More',
@@ -286,6 +289,23 @@ const messages = {
jobFinished: 'Finished',
jobInterrupted: 'Interrupted',
jobStarted: 'Started',
jobFailed: 'Failed',
jobSkipped: 'Skipped',
jobSuccess: 'Successful',
allTasks: 'All',
taskStart: 'Start',
taskEnd: 'End',
taskDuration: 'Duration',
taskSuccess: 'Successful',
taskFailed: 'Failed',
taskSkipped: 'Skipped',
taskStarted: 'Started',
taskInterrupted: 'Interrupted',
taskTransferredDataSize: 'Transfer size',
taskTransferredDataSpeed: 'Transfer speed',
taskMergedDataSize: 'Merge size',
taskMergedDataSpeed: 'Merge speed',
taskError: 'Error',
saveBackupJob: 'Save',
deleteBackupSchedule: 'Remove backup job',
deleteBackupScheduleQuestion:
@@ -324,6 +344,11 @@ const messages = {
runBackupNgJobConfirm: 'Are you sure you want to run {name} ({id})?',
// ------ New backup -----
newBackupAdvancedSettings: 'Advanced settings',
reportWhenAlways: 'Always',
reportWhenFailure: 'Failure',
reportWhenNever: 'Never',
reportWhen: 'Report when',
newBackupSelection: 'Select your backup type:',
smartBackupModeSelection: 'Select backup mode:',
normalBackup: 'Normal backup',
@@ -538,10 +563,11 @@ const messages = {
// ----- SR advanced tab -----
srUnhealthyVdiDepth: 'Depth',
srUnhealthyVdiNameLabel: 'Name',
srUnhealthyVdiSize: 'Size',
srUnhealthyVdiDepth: 'Depth',
srUnhealthyVdiTitle: 'VDI to coalesce ({total, number})',
srUnhealthyVdiUuid: 'UUID',
// ----- SR stats tab -----
@@ -764,6 +790,7 @@ const messages = {
started: 'Started {ago}',
paraVirtualizedMode: 'Paravirtualization (PV)',
hardwareVirtualizedMode: 'Hardware virtualization (HVM)',
windowsUpdateTools: 'Windows Update tools',
// ----- VM stat tab -----
statsCpu: 'CPU usage',
@@ -814,7 +841,6 @@ const messages = {
vdiMigrateAll: 'Migrate all VDIs',
vdiMigrateNoSr: 'No SR',
vdiMigrateNoSrMessage: 'A target SR is required to migrate a VDI',
vdiDelete: 'Delete VDI',
vdiForget: 'Forget',
vdiRemove: 'Remove VDI',
noControlDomainVdis: 'No VDIs attached to Control Domain',
@@ -825,7 +851,6 @@ const messages = {
vbdNoVbd: 'No disks',
vbdConnect: 'Connect VBD',
vbdDisconnect: 'Disconnect VBD',
vbdsDisconnect: 'Disconnect VBDs',
vbdBootable: 'Bootable',
vbdReadonly: 'Readonly',
vbdAction: 'Action',
@@ -932,6 +957,7 @@ const messages = {
vmCoresPerSocketIncorrectValue: 'Incorrect cores per socket value',
vmCoresPerSocketIncorrectValueSolution:
'Please change the selected value to fix it.',
vmHaDisabled: 'disabled',
vmMemoryLimitsLabel: 'Memory limits (min/max)',
vmMaxVcpus: 'vCPUs max:',
vmMaxRam: 'Memory max:',
@@ -1036,6 +1062,8 @@ const messages = {
vmNameLabel: 'Name',
vmNameDescription: 'Description',
vmContainer: 'Resident on',
vmSnapshotsRelatedToNonExistentBackups:
'VM snapshots related to non-existent backups',
alarmMessage: 'Alarms',
noAlarms: 'No alarms',
alarmDate: 'Date',
@@ -1531,6 +1559,7 @@ const messages = {
update: 'Update',
refresh: 'Refresh',
upgrade: 'Upgrade',
downgrade: 'Downgrade',
noUpdaterCommunity: 'No updater available for Community Edition',
considerSubscribe:
'Please consider subscribing and trying it with all the features for free during 15 days on {link}.',
@@ -1628,6 +1657,7 @@ const messages = {
logParams: 'Params',
logMessage: 'Message',
logError: 'Error',
logTitle: 'Logs',
logDisplayDetails: 'Display details',
logTime: 'Date',
logNoStackTrace: 'No stack trace',
@@ -1738,7 +1768,8 @@ const messages = {
xosanUsedSpace: 'Used space',
xosanLicense: 'License',
xosanMultipleLicenses: 'This XOSAN has more than 1 license!',
xosanNeedPack: 'XOSAN pack needs to be installed on each host of the pool.',
xosanNeedPack:
'XOSAN pack needs to be installed and up to date on each host of the pool.',
xosanInstallIt: 'Install it now!',
xosanNeedRestart:
'Some hosts need their toolstack to be restarted before you can create an XOSAN',
@@ -1766,6 +1797,14 @@ const messages = {
xosanPbdsDetached: 'Some SRs are detached from the XOSAN',
xosanBadStatus: 'Something is wrong with: {badStatuses}',
xosanRunning: 'Running',
xosanUpdatePacks: 'Update packs',
xosanPackUpdateChecking: 'Checking for updates',
xosanPackUpdateError:
'Error while checking XOSAN packs. Please make sure that the Cloud plugin is installed and loaded and that the updater is reachable.',
xosanPackUpdateUnavailable: 'XOSAN resources are unavailable',
xosanPackUpdateUnregistered: 'Not registered for XOSAN resources',
xosanPackUpdateUpToDate: "✓ This pool's XOSAN packs are up to date!",
xosanPackUpdateVersion: 'Update pool with latest pack v{version}',
xosanDelete: 'Delete XOSAN',
xosanFixIssue: 'Fix',
xosanCreatingOn: 'Creating XOSAN on {pool}',
@@ -1782,12 +1821,8 @@ const messages = {
xosanRegister: 'Register your appliance first',
xosanLoading: 'Loading…',
xosanNotAvailable: 'XOSAN is not available at the moment',
xosanInstallPackOnHosts: 'Install XOSAN pack on these hosts:',
xosanInstallPack: 'Install {pack} v{version}?',
xosanNoPackFound:
'No compatible XOSAN pack found for your XenServer versions.',
xosanPackRequirements:
'At least one of these version requirements must be satisfied by all the hosts in this pool:',
// SR tab XOSAN
xosanVmsNotRunning: 'Some XOSAN Virtual Machines are not running',
xosanVmsNotFound: 'Some XOSAN Virtual Machines could not be found',
@@ -1870,6 +1905,7 @@ const messages = {
xosanLoadXoaPlugin: 'Load XOA plugin first',
// ----- Utils -----
secondsFormat: '{seconds, plural, one {# second} other {# seconds}}',
durationFormat:
'{days, plural, =0 {} one {# day } other {# days }}{hours, plural, =0 {} one {# hour } other {# hours }}{minutes, plural, =0 {} one {# minute } other {# minutes }}{seconds, plural, =0 {} one {# second} other {# seconds}}',
}

View File

@@ -44,12 +44,7 @@ export default class GenericInput extends Component {
}
render () {
const {
schema,
value = schema.default,
uiSchema = EMPTY_OBJECT,
...opts
} = this.props
const { schema, value, uiSchema = EMPTY_OBJECT, ...opts } = this.props
const props = {
...opts,

View File

@@ -5,6 +5,7 @@ import { startsWith } from 'lodash'
import Icon from './icon'
import propTypes from './prop-types-decorator'
import { createGetObject } from './selectors'
import { FormattedDate } from 'react-intl'
import { isSrWritable } from './xo'
import { connectStore, formatSize } from './utils'
@@ -203,10 +204,29 @@ const xoItemToRender = {
: group.name_label}
</span>
),
backup: backup => (
<span>
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
{backup.mode}
</span>{' '}
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
<FormattedDate
value={new Date(backup.timestamp)}
month='long'
day='numeric'
year='numeric'
hour='2-digit'
minute='2-digit'
second='2-digit'
/>
</span>
),
}
const renderXoItem = (item, { className } = {}) => {
const { id, type, label } = item
const renderXoItem = (item, { className, type: xoType } = {}) => {
const { id, label } = item
const type = xoType || item.type
if (item.removed) {
return (
@@ -245,6 +265,9 @@ const renderXoItem = (item, { className } = {}) => {
export { renderXoItem as default }
export const getRenderXoItemOfType = type => (item, options = {}) =>
renderXoItem(item, { ...options, type })
const GenericXoItem = connectStore(() => {
const getObject = createGetObject()

View File

@@ -495,7 +495,8 @@ export const createDoesHostNeedRestart = hostSelector => {
)
)
.find([
({ guidance }) =>
({ guidance, upgrade }) =>
upgrade ||
find(
guidance,
action => action === 'restartHost' || action === 'restartXapi'

View File

@@ -197,6 +197,7 @@ const actionsShape = propTypes.arrayOf(
icon: propTypes.string.isRequired,
label: propTypes.node.isRequired,
level: propTypes.oneOf(['primary', 'warning', 'danger']),
redirectOnSuccess: propTypes.oneOfType([propTypes.func, propTypes.string]),
})
)
@@ -208,6 +209,13 @@ class IndividualAction extends Component {
(disabled, item, userData) =>
isFunction(disabled) ? disabled(item, userData) : disabled
)
_getLabel = createSelector(
() => this.props.label,
() => this.props.item,
() => this.props.userData,
(label, item, userData) =>
isFunction(label) ? label(item, userData) : label
)
_executeAction = () => {
const p = this.props
@@ -215,15 +223,18 @@ class IndividualAction extends Component {
}
render () {
const { icon, label, level } = this.props
const { icon, item, level, redirectOnSuccess, userData } = this.props
return (
<ActionRowButton
btnStyle={level}
data-item={item}
data-userData={userData}
disabled={this._getIsDisabled()}
handler={this._executeAction}
icon={icon}
tooltip={label}
redirectOnSuccess={redirectOnSuccess}
tooltip={this._getLabel()}
/>
)
}
@@ -237,6 +248,13 @@ class GroupedAction extends Component {
(disabled, selectedItems, userData) =>
isFunction(disabled) ? disabled(selectedItems, userData) : disabled
)
_getLabel = createSelector(
() => this.props.label,
() => this.props.selectedItems,
() => this.props.userData,
(label, selectedItems, userData) =>
isFunction(label) ? label(selectedItems, userData) : label
)
_executeAction = () => {
const p = this.props
@@ -244,7 +262,7 @@ class GroupedAction extends Component {
}
render () {
const { icon, label, level } = this.props
const { icon, level } = this.props
return (
<ActionRowButton
@@ -252,7 +270,7 @@ class GroupedAction extends Component {
disabled={this._getIsDisabled()}
handler={this._executeAction}
icon={icon}
tooltip={label}
tooltip={this._getLabel()}
/>
)
}

View File

@@ -20,6 +20,7 @@ import {
mapValues,
replace,
sample,
some,
startsWith,
} from 'lodash'
@@ -28,6 +29,7 @@ import * as actions from './store/actions'
import invoke from './invoke'
import store from './store'
import { getObject } from './selectors'
import { satisfies as versionSatisfies } from 'semver'
export const EMPTY_ARRAY = Object.freeze([])
export const EMPTY_OBJECT = Object.freeze({})
@@ -523,6 +525,40 @@ export const ShortDate = ({ timestamp }) => (
<FormattedDate value={timestamp} month='short' day='numeric' year='numeric' />
)
export const findLatestPack = (packs, hostsVersions) => {
const checkVersion = version =>
!version ||
every(hostsVersions, hostVersion => versionSatisfies(hostVersion, version))
let latestPack = { version: '0' }
forEach(packs, pack => {
if (
pack.type === 'iso' &&
compareVersions(pack.version, '>', latestPack.version) &&
checkVersion(pack.requirements && pack.requirements.xenserver)
) {
latestPack = pack
}
})
if (latestPack.version === '0') {
// No compatible pack was found
return
}
return latestPack
}
export const isLatestXosanPackInstalled = (latestXosanPack, hosts) =>
latestXosanPack !== undefined &&
every(hosts, host =>
some(
host.supplementalPacks,
({ name, version }) =>
name === 'XOSAN' && version === latestXosanPack.version
)
)
// ===================================================================
export const getMemoryUsedMetric = ({ memory, memoryFree = memory }) =>

View File

@@ -1,10 +1,22 @@
import ChartistGraph from 'react-chartist'
import ChartistLegend from 'chartist-plugin-legend'
import ChartistTooltip from 'chartist-plugin-tooltip'
import humanFormat from 'human-format'
import React from 'react'
import { injectIntl } from 'react-intl'
import { messages } from 'intl'
import { find, flatten, floor, get, map, max, size, sum, values } from 'lodash'
import {
find,
flatten,
floor,
get,
map,
max,
round,
size,
sum,
values,
} from 'lodash'
import propTypes from '../prop-types-decorator'
import { computeArraysSum } from '../xo-stats'
@@ -87,9 +99,9 @@ const makeLabelInterpolationFnc = (intl, nValues, endTimestamp, interval) => {
return (value, index) =>
index % labelSpace === 0
? intl.formatTime(
(endTimestamp - (nValues - index - 1) * interval) * 1000,
format
)
(endTimestamp - (nValues - index - 1) * interval) * 1000,
format
)
: null
}
@@ -441,19 +453,19 @@ export const PoolPifLineChart = injectIntl(
const series = addSumSeries
? map(ios, io => ({
name: `${intl.formatMessage(messages.poolAllHosts)} (${io})`,
data: computeArraysSum(
map(data, ({ stats }) => computeArraysSum(stats.pifs[io]))
),
}))
name: `${intl.formatMessage(messages.poolAllHosts)} (${io})`,
data: computeArraysSum(
map(data, ({ stats }) => computeArraysSum(stats.pifs[io]))
),
}))
: flatten(
map(data, ({ stats, host }) =>
map(ios, io => ({
name: `${host} (${io})`,
data: computeArraysSum(stats.pifs[io]),
}))
map(data, ({ stats, host }) =>
map(ios, io => ({
name: `${host} (${io})`,
data: computeArraysSum(stats.pifs[io]),
}))
)
)
)
return (
<ChartistGraph
@@ -604,7 +616,11 @@ export const IopsLineChart = injectIntl(
nValues: length,
endTimestamp,
interval,
valueTransform: value => `${value.toPrecision(3)} /s`,
valueTransform: value =>
humanFormat(value, {
decimals: 3,
unit: 'IOPS',
}),
}),
...options,
}}
@@ -721,7 +737,7 @@ export const IowaitChart = injectIntl(
nValues: length,
endTimestamp,
interval,
valueTransform: value => `${value.toPrecision(2)}%`,
valueTransform: value => `${round(value, 2)}%`,
}),
...options,
}}

Some files were not shown because too many files have changed in this diff Show More