Compare commits
25 Commits
xo-server-
...
fs-v0.0.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a21bf4ebe5 | ||
|
|
3d0420dbd9 | ||
|
|
04c74dd30f | ||
|
|
2f256291ae | ||
|
|
bcb66a4145 | ||
|
|
2d9368062e | ||
|
|
b110bacf61 | ||
|
|
78afdc0af5 | ||
|
|
ad6cd7985a | ||
|
|
a61661776d | ||
|
|
1a9ebddcab | ||
|
|
7ab907a854 | ||
|
|
68a34f7cdb | ||
|
|
da4ff3082d | ||
|
|
9c05a59b5f | ||
|
|
6780146505 | ||
|
|
2758833fc6 | ||
|
|
2786d7ec46 | ||
|
|
945a2006c9 | ||
|
|
b9e574e32f | ||
|
|
34f1ef1680 | ||
|
|
4ac4310bc1 | ||
|
|
a10997ca66 | ||
|
|
0e52a4c7dc | ||
|
|
a4b3e22c2b |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,6 +8,8 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
|
||||
@@ -12,6 +12,7 @@ addons:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
|
||||
3
@xen-orchestra/fs/.babelrc.js
Normal file
3
@xen-orchestra/fs/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
54
@xen-orchestra/fs/package.json
Normal file
54
@xen-orchestra/fs/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"execa": "^0.10.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"xo-remote-parser": "^0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
// @flow
|
||||
|
||||
import getStream from 'get-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { type Readable, type Writable } from 'stream'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
|
||||
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
type Data = Buffer | Readable | string
|
||||
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
async test (): Promise<Object> {
|
||||
const testFileName = `${Date.now()}.test`
|
||||
const data = getPseudoRandomBytes(1024 * 1024)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
await this.outputFile(testFileName, data)
|
||||
@@ -97,7 +97,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this.createReadStream(file, options).then(streamToBuffer)
|
||||
return this.createReadStream(file, options).then(getStream.buffer)
|
||||
}
|
||||
|
||||
async rename (
|
||||
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '.'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test("fs test doesn't crash", async () => {
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const result = await handler.test()
|
||||
expect(result.success).toBeTruthy()
|
||||
})
|
||||
@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
|
||||
export type { default as RemoteHandler } from './abstract'
|
||||
export type Remote = { url: string }
|
||||
|
||||
const HANDLERS = {
|
||||
@@ -1,7 +1,9 @@
|
||||
import Smb2 from '@marsaud/smb2-promise'
|
||||
import { lastly as pFinally } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { noop, pFinally } from '../utils'
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const normalizeError = error => {
|
||||
102
CHANGELOG.md
102
CHANGELOG.md
@@ -1,9 +1,104 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.20.0** (planned 2018-05-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
|
||||
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
|
||||
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
|
||||
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
|
||||
|
||||
### Bugs
|
||||
|
||||
## **5.19.0** (2018-05-01)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
|
||||
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
|
||||
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
|
||||
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
|
||||
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
|
||||
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
|
||||
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
|
||||
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
|
||||
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
|
||||
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
|
||||
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
|
||||
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
|
||||
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
|
||||
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
|
||||
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
|
||||
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
|
||||
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
|
||||
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
|
||||
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
|
||||
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
|
||||
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
|
||||
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
|
||||
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
|
||||
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
|
||||
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
|
||||
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
|
||||
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
|
||||
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
|
||||
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
|
||||
|
||||
## **5.18.0** (2018-03-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
|
||||
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
|
||||
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
|
||||
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
|
||||
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
|
||||
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
|
||||
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
|
||||
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
|
||||
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
|
||||
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
|
||||
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
|
||||
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
|
||||
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
|
||||
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
|
||||
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
|
||||
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
|
||||
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
|
||||
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
|
||||
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
|
||||
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
|
||||
|
||||
|
||||
### Bugs
|
||||
|
||||
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
|
||||
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
|
||||
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
|
||||
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
|
||||
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
|
||||
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
|
||||
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
|
||||
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
|
||||
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
|
||||
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
|
||||
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
|
||||
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
|
||||
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
|
||||
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
|
||||
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
|
||||
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
|
||||
|
||||
## **5.17.0** (2018-03-02)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
|
||||
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
|
||||
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
|
||||
@@ -22,6 +117,9 @@
|
||||
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
|
||||
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
|
||||
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
|
||||
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
|
||||
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
|
||||
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -41,6 +139,7 @@
|
||||
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
|
||||
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
|
||||
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
|
||||
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
|
||||
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
|
||||
@@ -50,6 +149,8 @@
|
||||
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
|
||||
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
|
||||
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -79,6 +180,7 @@
|
||||
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
|
||||
|
||||
### Bugs
|
||||
|
||||
|
||||
3
flow-typed/promise-toolbox.js
vendored
3
flow-typed/promise-toolbox.js
vendored
@@ -5,6 +5,9 @@ declare module 'promise-toolbox' {
|
||||
reject: T => void,
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromCallback<T>(
|
||||
(cb: (error: any, value: T) => void) => void
|
||||
): Promise<T>
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
|
||||
@@ -34,16 +34,18 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-vmdk-to-vhd/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"transform": {
|
||||
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
|
||||
"/@xen-orchestra/fs/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/vhd-lib/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-vmdk-to-vhd/.+\\.jsx?$": "babel-7-jest",
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.0.0",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -26,10 +26,11 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"struct-fu": "^1.2.0",
|
||||
"@nraynaud/xo-fs": "^0.0.5",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"babel-runtime": "^6.22.0",
|
||||
"exec-promise": "^0.7.0"
|
||||
"exec-promise": "^0.7.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
@@ -38,14 +39,18 @@
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"execa": "^0.10.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepare": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
|
||||
15
packages/vhd-cli/src/commands/check.js
Normal file
15
packages/vhd-cli/src/commands/check.js
Normal file
@@ -0,0 +1,15 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
for (const vhd of args) {
|
||||
try {
|
||||
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
|
||||
console.log('ok:', vhd)
|
||||
} catch (error) {
|
||||
console.error('nok:', vhd, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
packages/vhd-cli/src/commands/info.js
Normal file
12
packages/vhd-cli/src/commands/info.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd.header)
|
||||
console.log(vhd.footer)
|
||||
}
|
||||
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
@@ -0,0 +1,21 @@
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
export default async function main (args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
return new Promise((resolve, reject) => {
|
||||
createSyntheticStream(handler, path.resolve(args[0]))
|
||||
.on('error', reject)
|
||||
.pipe(
|
||||
createWriteStream(args[1])
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1,19 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import commands from './commands'
|
||||
|
||||
execPromise(async args => {
|
||||
const vhd = new Vhd(
|
||||
new RemoteHandlerLocal({ url: 'file:///' }),
|
||||
resolve(args[0])
|
||||
function runCommand (commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
command = 'help'
|
||||
}
|
||||
|
||||
const fn = commands[command]
|
||||
|
||||
if (fn === undefined) {
|
||||
if (command === 'help') {
|
||||
return `Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}`
|
||||
}
|
||||
|
||||
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
|
||||
}
|
||||
|
||||
return fn.call(
|
||||
{
|
||||
__proto__: this,
|
||||
command: `${this.command} ${command}`,
|
||||
},
|
||||
args
|
||||
)
|
||||
}
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd._header)
|
||||
console.log(vhd._footer)
|
||||
})
|
||||
execPromise(
|
||||
runCommand.bind(
|
||||
{
|
||||
command: 'vhd-cli',
|
||||
runCommand,
|
||||
},
|
||||
commands
|
||||
)
|
||||
)
|
||||
|
||||
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import command from './commands/info'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('can run the command', async () => {
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
|
||||
await command(['empty.vhd'])
|
||||
})
|
||||
@@ -1,461 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname } from 'path'
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
/* eslint-disable no-unused-vars */
|
||||
|
||||
const HARD_DISK_TYPE_DIFFERENCING = 4
|
||||
const HARD_DISK_TYPE_DYNAMIC = 3
|
||||
const HARD_DISK_TYPE_FIXED = 2
|
||||
const PLATFORM_CODE_NONE = 0
|
||||
export const SECTOR_SIZE = 512
|
||||
|
||||
/* eslint-enable no-unused vars */
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.byte('reserved', 426), // 86
|
||||
])
|
||||
const FOOTER_SIZE = fuFooter.size
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.byte('reserved1', 4),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
],
|
||||
8
|
||||
),
|
||||
fu.byte('reserved2', 256),
|
||||
])
|
||||
const HEADER_SIZE = fuHeader.size
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// bytes[] bit manipulation
|
||||
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
|
||||
const setBit = (map, bit) => {
|
||||
map[bit >> 3] |= 1 << (bit & 7)
|
||||
}
|
||||
const unsetBit = (map, bit) => {
|
||||
map[bit >> 3] &= ~(1 << (bit & 7))
|
||||
}
|
||||
|
||||
const addOffsets = (...offsets) =>
|
||||
offsets.reduce(
|
||||
(a, b) =>
|
||||
b == null
|
||||
? a
|
||||
: typeof b === 'object'
|
||||
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
|
||||
: { bytes: a.bytes + b, bits: a.bits },
|
||||
{ bytes: 0, bits: 0 }
|
||||
)
|
||||
|
||||
const pack = (field, value, buf, offset) => {
|
||||
field.pack(value, buf, addOffsets(field.offset, offset))
|
||||
}
|
||||
|
||||
const unpack = (field, buf, offset) =>
|
||||
field.unpack(buf, addOffsets(field.offset, offset))
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
const streamToExistingBuffer = (
|
||||
stream,
|
||||
buffer,
|
||||
offset = 0,
|
||||
end = buffer.length
|
||||
) =>
|
||||
new Promise((resolve, reject) => {
|
||||
assert(offset >= 0)
|
||||
assert(end > offset)
|
||||
assert(end <= buffer.length)
|
||||
|
||||
let i = offset
|
||||
|
||||
const onData = chunk => {
|
||||
const prev = i
|
||||
i += chunk.length
|
||||
|
||||
if (i > end) {
|
||||
return onError(new Error('too much data'))
|
||||
}
|
||||
|
||||
chunk.copy(buffer, prev)
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(i - offset)
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
const computeChecksum = (struct, buf, offset = 0) => {
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumField = struct.fields.checksum
|
||||
const checksumOffset = offset + checksumField.offset
|
||||
for (let i = offset, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = offset + struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
return ~sum >>> 0
|
||||
}
|
||||
|
||||
const verifyChecksum = (struct, buf, offset) =>
|
||||
unpack(struct.fields.checksum, buf, offset) ===
|
||||
computeChecksum(struct, buf, offset)
|
||||
|
||||
const getParentLocatorSize = parentLocatorEntry => {
|
||||
const { platformDataSpace } = parentLocatorEntry
|
||||
|
||||
if (platformDataSpace < SECTOR_SIZE) {
|
||||
return platformDataSpace * SECTOR_SIZE
|
||||
}
|
||||
|
||||
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Euclidean division, returns the quotient and the remainder of a / b.
|
||||
const div = (a, b) => [Math.floor(a / b), a % b]
|
||||
|
||||
export default class Vhd {
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
|
||||
this._blockAllocationTable = null
|
||||
this._blockBitmapSize = null
|
||||
this._footer = null
|
||||
this._header = null
|
||||
this._parent = null
|
||||
this._sectorsPerBlock = null
|
||||
}
|
||||
|
||||
// Read `length` bytes starting from `begin`.
|
||||
//
|
||||
// - if `buffer`: it is filled starting from `offset`, and the
|
||||
// number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_read (begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
return this._handler
|
||||
.createReadStream(this._path, {
|
||||
end: begin + length - 1,
|
||||
start: begin,
|
||||
})
|
||||
.then(
|
||||
buf
|
||||
? stream =>
|
||||
streamToExistingBuffer(
|
||||
stream,
|
||||
buf,
|
||||
offset,
|
||||
(offset || 0) + length
|
||||
)
|
||||
: streamToNewBuffer
|
||||
)
|
||||
}
|
||||
|
||||
// - if `buffer`: it is filled with 0 starting from `offset`, and
|
||||
// the number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_zeroes (length, buf, offset = 0) {
|
||||
if (buf) {
|
||||
assert(offset >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const end = offset + length
|
||||
assert(end <= buf.length)
|
||||
|
||||
buf.fill(0, offset, end)
|
||||
return Promise.resolve(length)
|
||||
}
|
||||
|
||||
return Promise.resolve(Buffer.alloc(length))
|
||||
}
|
||||
|
||||
// Return the position of a block in the VHD or undefined if not found.
|
||||
_getBlockAddress (block) {
|
||||
assert(block >= 0)
|
||||
assert(block < this._header.maxTableEntries)
|
||||
|
||||
const blockAddr = this._blockAllocationTable[block]
|
||||
if (blockAddr !== 0xffffffff) {
|
||||
return blockAddr * SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
|
||||
if (!verifyChecksum(fuFooter, buf)) {
|
||||
throw new Error('footer checksum does not match')
|
||||
}
|
||||
|
||||
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
|
||||
throw new Error('header checksum does not match')
|
||||
}
|
||||
|
||||
return this._initMetadata(
|
||||
unpack(fuHeader, buf, FOOTER_SIZE),
|
||||
unpack(fuFooter, buf)
|
||||
)
|
||||
}
|
||||
|
||||
async _initMetadata (header, footer) {
|
||||
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
|
||||
assert(sectorsPerBlock % 1 === 0)
|
||||
|
||||
// 1 bit per sector, rounded up to full sectors
|
||||
this._blockBitmapSize =
|
||||
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
assert(this._blockBitmapSize === SECTOR_SIZE)
|
||||
|
||||
this._footer = footer
|
||||
this._header = header
|
||||
this.size = uint32ToUint64(this._footer.currentSize)
|
||||
|
||||
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
|
||||
const parent = new Vhd(
|
||||
this._handler,
|
||||
`${dirname(this._path)}/${header.parentUnicodeName}`
|
||||
)
|
||||
await parent.readHeaderAndFooter()
|
||||
await parent.readBlockAllocationTable()
|
||||
|
||||
this._parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readBlockAllocationTable () {
|
||||
const { maxTableEntries, tableOffset } = this._header
|
||||
const fuTable = fu.uint32(maxTableEntries)
|
||||
|
||||
this._blockAllocationTable = unpack(
|
||||
fuTable,
|
||||
await this._read(uint32ToUint64(tableOffset), fuTable.size)
|
||||
)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// read a single sector in a block
|
||||
async _readBlockSector (block, sector, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
assert(begin + length <= SECTOR_SIZE)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const blockBitmapSize = this._blockBitmapSize
|
||||
const parent = this._parent
|
||||
|
||||
if (
|
||||
blockAddr &&
|
||||
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
|
||||
) {
|
||||
return this._read(
|
||||
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
return parent
|
||||
? parent._readBlockSector(block, sector, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
_readBlock (block, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const { blockSize } = this._header
|
||||
assert(begin + length <= blockSize)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const parent = this._parent
|
||||
|
||||
if (!blockAddr) {
|
||||
return parent
|
||||
? parent._readBlock(block, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
if (!parent) {
|
||||
return this._read(
|
||||
blockAddr + this._blockBitmapSize + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
// FIXME: we should read as many sectors in a single pass as
|
||||
// possible for maximum perf.
|
||||
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
|
||||
return this._readBlockSector(
|
||||
block,
|
||||
sector,
|
||||
beginInSector,
|
||||
Math.min(length, SECTOR_SIZE - beginInSector),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
read (buf, begin, length = buf.length, offset) {
|
||||
assert(Buffer.isBuffer(buf))
|
||||
assert(begin >= 0)
|
||||
|
||||
const { size } = this
|
||||
if (begin >= size) {
|
||||
return Promise.resolve(0)
|
||||
}
|
||||
|
||||
const { blockSize } = this._header
|
||||
const [block, beginInBlock] = div(begin, blockSize)
|
||||
|
||||
return this._readBlock(
|
||||
block,
|
||||
beginInBlock,
|
||||
Math.min(length, blockSize - beginInBlock, size - begin),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
}
|
||||
3
packages/vhd-lib/.babelrc.js
Normal file
3
packages/vhd-lib/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
56
packages/vhd-lib/package.json
Normal file
56
packages/vhd-lib/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"execa": "^0.10.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"fs-promise": "^2.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
7
packages/vhd-lib/src/_bitmap.js
Normal file
7
packages/vhd-lib/src/_bitmap.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const MASK = 0x80
|
||||
|
||||
export const set = (map, bit) => {
|
||||
map[bit >> 3] |= MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
|
||||
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default function computeGeometryForSize (size) {
|
||||
const totalSectors = Math.ceil(size / 512)
|
||||
let sectorsPerTrackCylinder
|
||||
let heads
|
||||
let cylinderTimesHeads
|
||||
if (totalSectors > 65535 * 16 * 255) {
|
||||
throw Error('disk is too big')
|
||||
}
|
||||
// straight copypasta from the file spec appendix on CHS Calculation
|
||||
if (totalSectors >= 65535 * 16 * 63) {
|
||||
sectorsPerTrackCylinder = 255
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
} else {
|
||||
sectorsPerTrackCylinder = 17
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
|
||||
if (heads < 4) {
|
||||
heads = 4
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
|
||||
sectorsPerTrackCylinder = 31
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024) {
|
||||
sectorsPerTrackCylinder = 63
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
}
|
||||
const cylinders = Math.ceil(cylinderTimesHeads / heads)
|
||||
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
|
||||
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
|
||||
}
|
||||
30
packages/vhd-lib/src/_constants.js
Normal file
30
packages/vhd-lib/src/_constants.js
Normal file
@@ -0,0 +1,30 @@
|
||||
export const BLOCK_UNUSED = 0xffffffff
|
||||
|
||||
// This lib has been extracted from the Xen Orchestra project.
|
||||
export const CREATOR_APPLICATION = 'xo '
|
||||
|
||||
// Sizes in bytes.
|
||||
export const FOOTER_SIZE = 512
|
||||
export const HEADER_SIZE = 1024
|
||||
export const SECTOR_SIZE = 512
|
||||
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
|
||||
|
||||
export const FOOTER_COOKIE = 'conectix'
|
||||
export const HEADER_COOKIE = 'cxsparse'
|
||||
|
||||
export const DISK_TYPE_FIXED = 2
|
||||
export const DISK_TYPE_DYNAMIC = 3
|
||||
export const DISK_TYPE_DIFFERENCING = 4
|
||||
|
||||
export const PARENT_LOCATOR_ENTRIES = 8
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_WI2R = 0x57693272
|
||||
export const PLATFORM_WI2K = 0x5769326b
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
export const FILE_FORMAT_VERSION = 1 << 16
|
||||
export const HEADER_VERSION = 1 << 16
|
||||
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import { v4 as generateUuid } from 'uuid'
|
||||
|
||||
import { checksumStruct, fuFooter, fuHeader } from './_structs'
|
||||
import {
|
||||
CREATOR_APPLICATION,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_FIXED,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PLATFORM_WI2K,
|
||||
} from './_constants'
|
||||
|
||||
export function createFooter (
|
||||
size,
|
||||
timestamp,
|
||||
geometry,
|
||||
dataOffset,
|
||||
diskType = DISK_TYPE_FIXED
|
||||
) {
|
||||
const footer = fuFooter.pack({
|
||||
cookie: FOOTER_COOKIE,
|
||||
features: 2,
|
||||
fileFormatVersion: FILE_FORMAT_VERSION,
|
||||
dataOffset,
|
||||
timestamp,
|
||||
creatorApplication: CREATOR_APPLICATION,
|
||||
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
|
||||
originalSize: size,
|
||||
currentSize: size,
|
||||
diskGeometry: geometry,
|
||||
diskType,
|
||||
uuid: generateUuid(null, []),
|
||||
})
|
||||
checksumStruct(footer, fuFooter)
|
||||
return footer
|
||||
}
|
||||
|
||||
export function createHeader (
|
||||
maxTableEntries,
|
||||
tableOffset = HEADER_SIZE + FOOTER_SIZE,
|
||||
blockSize = VHD_BLOCK_SIZE_BYTES
|
||||
) {
|
||||
const header = fuHeader.pack({
|
||||
cookie: HEADER_COOKIE,
|
||||
tableOffset,
|
||||
headerVersion: HEADER_VERSION,
|
||||
maxTableEntries,
|
||||
blockSize,
|
||||
})
|
||||
checksumStruct(header, fuHeader)
|
||||
return header
|
||||
}
|
||||
121
packages/vhd-lib/src/_structs.js
Normal file
121
packages/vhd-lib/src/_structs.js
Normal file
@@ -0,0 +1,121 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
|
||||
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
const uint64Undefinable = fu.derive(
|
||||
fu.uint32(2),
|
||||
number =>
|
||||
number === undefined
|
||||
? [0xffffffff, 0xffffffff]
|
||||
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ =>
|
||||
_[0] === 0xffffffff && _[1] === 0xffffffff
|
||||
? undefined
|
||||
: _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
export const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64Undefinable('dataOffset'), // offset of the header
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
|
||||
|
||||
export const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
uint64Undefinable('dataOffset'),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
assert.strictEqual(fuHeader.size, HEADER_SIZE)
|
||||
|
||||
export const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
export const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
export function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
37
packages/vhd-lib/src/chain.js
Normal file
37
packages/vhd-lib/src/chain.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { dirname, relative } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING } from './_constants'
|
||||
|
||||
export default async function chain (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockAllocationTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.setUniqueParentLocator(parentName)
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
}
|
||||
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
|
||||
export default asyncIteratorToStream(async function * (size, blockParser) {
|
||||
const geometry = computeGeometryForSize(size)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry
|
||||
)
|
||||
let position = 0
|
||||
|
||||
function * filePadding (paddingLength) {
|
||||
if (paddingLength > 0) {
|
||||
const chunkSize = 1024 * 1024 // 1Mo
|
||||
for (
|
||||
let paddingPosition = 0;
|
||||
paddingPosition + chunkSize < paddingLength;
|
||||
paddingPosition += chunkSize
|
||||
) {
|
||||
yield Buffer.alloc(chunkSize)
|
||||
}
|
||||
yield Buffer.alloc(paddingLength % chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.offsetBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield * filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.offsetBytes + next.data.length
|
||||
}
|
||||
yield * filePadding(actualSize - position)
|
||||
yield footer
|
||||
})
|
||||
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
@@ -0,0 +1,143 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
import { set as setBitmap } from './_bitmap'
|
||||
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
|
||||
*/
|
||||
function createBAT (
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
const vhdOccupationTable = []
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
const scaled = blockPosition / VHD_BLOCK_SIZE_BYTES
|
||||
const vhdTableIndex = Math.floor(scaled)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
// not using bit operators to avoid the int32 coercion, that way we can go to 53 bits
|
||||
vhdOccupationTable[vhdTableIndex] =
|
||||
(vhdOccupationTable[vhdTableIndex] || 0) +
|
||||
Math.pow(2, (scaled % 1) * ratio)
|
||||
})
|
||||
return vhdOccupationTable
|
||||
}
|
||||
|
||||
function createBitmap (bitmapSize, ratio, vhdOccupationBucket) {
|
||||
const bitmap = Buffer.alloc(bitmapSize)
|
||||
for (let i = 0; i < VHD_BLOCK_SIZE_SECTORS / ratio; i++) {
|
||||
// do not shift to avoid int32 coercion
|
||||
if ((vhdOccupationBucket * Math.pow(2, -i)) & 1) {
|
||||
for (let j = 0; j < ratio; j++) {
|
||||
setBitmap(bitmap, i * ratio + j)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bitmap
|
||||
}
|
||||
|
||||
function * yieldIfNotEmpty (buffer) {
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
}
|
||||
}
|
||||
|
||||
async function * generateFileContent (
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
) {
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockBuffer = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
const batEntry = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batEntry !== currentVhdBlockIndex) {
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
currentBlockBuffer = Buffer.alloc(VHD_BLOCK_SIZE_BYTES)
|
||||
currentVhdBlockIndex = batEntry
|
||||
yield createBitmap(bitmapSize, ratio, vhdOccupationTable[batEntry])
|
||||
}
|
||||
next.data.copy(currentBlockBuffer, next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
}
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
}
|
||||
|
||||
export default asyncIteratorToStream(async function * (
|
||||
diskSize,
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
|
||||
)
|
||||
}
|
||||
|
||||
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
|
||||
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
|
||||
|
||||
const batPosition = FOOTER_SIZE + HEADER_SIZE
|
||||
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
|
||||
const geometry = computeGeometryForSize(diskSize)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry,
|
||||
FOOTER_SIZE,
|
||||
DISK_TYPE_DYNAMIC
|
||||
)
|
||||
const header = createHeader(
|
||||
maxTableEntries,
|
||||
batPosition,
|
||||
VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
const vhdOccupationTable = createBAT(
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
)
|
||||
yield footer
|
||||
yield header
|
||||
yield bat
|
||||
yield * generateFileContent(
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
)
|
||||
yield footer
|
||||
})
|
||||
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
@@ -0,0 +1,153 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { dirname, resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
export default asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = vhds[nVhds - 1]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: FOOTER_SIZE + HEADER_SIZE,
|
||||
parentTimestamp: rootVhd.header.parentTimestamp,
|
||||
parentUnicodeName: rootVhd.header.parentUnicodeName,
|
||||
parentUuid: rootVhd.header.parentUuid,
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(vhd.batSize)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
dataOffset: FOOTER_SIZE,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil(
|
||||
(header.tableOffset + bat.length) / SECTOR_SIZE
|
||||
);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
// TODO: for generic usage the bitmap needs to be properly computed for each block
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd === rootVhd
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
8
packages/vhd-lib/src/index.js
Normal file
8
packages/vhd-lib/src/index.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export { default } from './vhd'
|
||||
export { default as chainVhd } from './chain'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
@@ -2,25 +2,25 @@
|
||||
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import LocalHandler from './remote-handlers/local'
|
||||
import vhdMerge, {
|
||||
chainVhd,
|
||||
createReadStream,
|
||||
Vhd,
|
||||
VHD_SECTOR_SIZE,
|
||||
} from './vhd-merge'
|
||||
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
|
||||
import chainVhd from './chain'
|
||||
import createReadStream from './createSyntheticStream'
|
||||
import Vhd from './vhd'
|
||||
import vhdMerge from './merge'
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await tmpDir()
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
@@ -57,11 +57,11 @@ test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
@@ -70,20 +70,18 @@ test('blocks can be moved', async () => {
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb =>
|
||||
randomBytes(VHD_SECTOR_SIZE, cb)
|
||||
)
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
|
||||
// we recover the data manually for speed reasons.
|
||||
@@ -93,7 +91,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockTable()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
const entry = vhd._getBatEntry(i)
|
||||
if (entry !== 0xffffffff) {
|
||||
@@ -110,7 +108,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
} finally {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await streamToBuffer(
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
@@ -124,11 +122,11 @@ test('writeData on empty file', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
@@ -139,11 +137,11 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(
|
||||
@@ -159,11 +157,11 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
const startSecondWrite = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
@@ -179,11 +177,11 @@ test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
@@ -203,7 +201,7 @@ test('coalesce works with empty parent files', async () => {
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
@@ -226,11 +224,11 @@ test('coalesce works in normal cases', async () => {
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
@@ -242,7 +240,7 @@ test('coalesce works in normal cases', async () => {
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
@@ -261,7 +259,7 @@ test('coalesce works in normal cases', async () => {
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
@@ -270,15 +268,16 @@ test('coalesce works in normal cases', async () => {
|
||||
)
|
||||
})
|
||||
|
||||
test('createReadStream passes vhd-util check', async () => {
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = createReadStream(handler, 'randomfile.vhd')
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
})
|
||||
77
packages/vhd-lib/src/merge.js
Normal file
77
packages/vhd-lib/src/merge.js
Normal file
@@ -0,0 +1,77 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
export default concurrency(2)(async function merge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
const parentDiskType = parentVhd.footer.diskType
|
||||
assert(
|
||||
parentDiskType === DISK_TYPE_DIFFERENCING ||
|
||||
parentDiskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([
|
||||
parentVhd.readBlockAllocationTable(),
|
||||
childVhd.readBlockAllocationTable(),
|
||||
])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
@@ -0,0 +1,134 @@
|
||||
/* eslint-env jest */
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
|
||||
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
import createReadableRawVHDStream from './createReadableRawStream'
|
||||
import createReadableSparseVHDStream from './createReadableSparseStream'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
createFooter(104448, Math.floor(Date.now() / 1000), {
|
||||
cylinders: 3,
|
||||
heads: 4,
|
||||
sectorsPerTrack: 17,
|
||||
})
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawVHDStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
return expect(
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawVHDStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
|
||||
test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
offsetBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: blockSize * 5,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
const fileSize = blockSize * 10
|
||||
const stream = createReadableSparseVHDStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
[100, 700],
|
||||
blocks
|
||||
)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
@@ -1,18 +1,30 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname, relative } from 'path'
|
||||
import getStream from 'get-stream'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import type RemoteHandler from './remote-handlers/abstract'
|
||||
import constantStream from './constant-stream'
|
||||
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
|
||||
import constantStream from './_constant-stream'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
@@ -24,160 +36,12 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
// Sizes in bytes.
|
||||
const VHD_FOOTER_SIZE = 512
|
||||
const VHD_HEADER_SIZE = 1024
|
||||
export const VHD_SECTOR_SIZE = 512
|
||||
|
||||
// Block allocation table entry size. (Block addr)
|
||||
const VHD_ENTRY_SIZE = 4
|
||||
|
||||
const VHD_PARENT_LOCATOR_ENTRIES = 8
|
||||
const VHD_PLATFORM_CODE_NONE = 0
|
||||
|
||||
// Types of backup treated. Others are not supported.
|
||||
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
|
||||
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
// Other.
|
||||
const BLOCK_UNUSED = 0xffffffff
|
||||
const BIT_MASK = 0x80
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64('dataOffset'), // offset of the header, should always be 512
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.uint8('dataOffsetUnused', 8),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
VHD_PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const computeBatSize = entries =>
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
|
||||
|
||||
// Check/Set a bit on a vhd map.
|
||||
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
|
||||
const mapSetBit = (map, bit) => {
|
||||
map[bit >> 3] |= BIT_MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
@@ -187,6 +51,10 @@ const assertChecksum = (name, buf, struct) => {
|
||||
}
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Format:
|
||||
@@ -210,7 +78,8 @@ const assertChecksum = (name, buf, struct) => {
|
||||
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
|
||||
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
|
||||
// - sectorSize = 512
|
||||
export class Vhd {
|
||||
|
||||
export default class Vhd {
|
||||
get batSize () {
|
||||
return computeBatSize(this.header.maxTableEntries)
|
||||
}
|
||||
@@ -232,7 +101,12 @@ export class Vhd {
|
||||
}
|
||||
|
||||
_read (start, n) {
|
||||
return this._readStream(start, n).then(streamToBuffer)
|
||||
return this._readStream(start, n)
|
||||
.then(getStream.buffer)
|
||||
.then(buf => {
|
||||
assert.equal(buf.length, n)
|
||||
return buf
|
||||
})
|
||||
}
|
||||
|
||||
containsBlock (id) {
|
||||
@@ -243,15 +117,15 @@ export class Vhd {
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(end, header.tableOffset + this.batSize)
|
||||
|
||||
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
|
||||
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
|
||||
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
|
||||
if (entry.platformCode !== PLATFORM_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
|
||||
@@ -266,7 +140,7 @@ export class Vhd {
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData () {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
@@ -283,25 +157,46 @@ export class Vhd {
|
||||
return sectorsToBytes(end)
|
||||
}
|
||||
|
||||
// Get the beginning (footer + header) of a vhd file.
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
|
||||
// TODO: extract the checks into reusable functions:
|
||||
// - better human reporting
|
||||
// - auto repair if possible
|
||||
async readHeaderAndFooter (checkSecondFooter = true) {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(FOOTER_SIZE)
|
||||
|
||||
assertChecksum('footer', bufFooter, fuFooter)
|
||||
assertChecksum('header', bufHeader, fuHeader)
|
||||
|
||||
if (checkSecondFooter) {
|
||||
const size = await this._handler.getSize(this._path)
|
||||
assert(
|
||||
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
|
||||
'footer1 !== footer2'
|
||||
)
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
|
||||
header.blockSize / VHD_SECTOR_SIZE
|
||||
))
|
||||
const sectorsPerBlock = (this.sectorsPerBlock =
|
||||
header.blockSize / SECTOR_SIZE)
|
||||
|
||||
// Compute bitmap size in sectors.
|
||||
// Default: 1.
|
||||
@@ -317,23 +212,18 @@ export class Vhd {
|
||||
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
|
||||
}
|
||||
|
||||
// Check if a vhd object has a block allocation table.
|
||||
hasBlockAllocationTableMap () {
|
||||
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
|
||||
}
|
||||
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockTable () {
|
||||
async readBlockAllocationTable () {
|
||||
const { header } = this
|
||||
this.blockTable = await this._read(
|
||||
header.tableOffset,
|
||||
header.maxTableEntries * VHD_ENTRY_SIZE
|
||||
header.maxTableEntries * 4
|
||||
)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry (block) {
|
||||
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
|
||||
return this.blockTable.readUInt32BE(block * 4)
|
||||
}
|
||||
|
||||
_readBlock (blockId, onlyBitmap = false) {
|
||||
@@ -371,7 +261,7 @@ export class Vhd {
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
@@ -395,7 +285,7 @@ export class Vhd {
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
@@ -431,7 +321,7 @@ export class Vhd {
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
@@ -439,7 +329,7 @@ export class Vhd {
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
@@ -478,7 +368,7 @@ export class Vhd {
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
|
||||
debug(
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
@@ -491,21 +381,18 @@ export class Vhd {
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
_setBatEntry (block, blockSector) {
|
||||
const i = block * VHD_ENTRY_SIZE
|
||||
const i = block * 4
|
||||
const { blockTable } = this
|
||||
|
||||
blockTable.writeUInt32BE(blockSector, i)
|
||||
|
||||
return this._write(
|
||||
blockTable.slice(i, i + VHD_ENTRY_SIZE),
|
||||
this.header.tableOffset + i
|
||||
)
|
||||
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
|
||||
}
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock (blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
@@ -647,7 +534,7 @@ export class Vhd {
|
||||
const { header } = this
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
header.checksum = checksumStruct(rawHeader, fuHeader)
|
||||
const offset = VHD_FOOTER_SIZE
|
||||
const offset = FOOTER_SIZE
|
||||
debug(
|
||||
`Write header at: ${offset} (checksum=${
|
||||
header.checksum
|
||||
@@ -657,12 +544,12 @@ export class Vhd {
|
||||
}
|
||||
|
||||
async writeData (offsetSectors, buffer) {
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
|
||||
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
|
||||
const endBufferSectors = offsetSectors + bufferSizeSectors
|
||||
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
|
||||
await this.ensureBatSize(lastBlock)
|
||||
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
|
||||
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
|
||||
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
|
||||
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
|
||||
|
||||
@@ -681,11 +568,11 @@ export class Vhd {
|
||||
)
|
||||
const startInBuffer = Math.max(
|
||||
0,
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
|
||||
)
|
||||
const endInBuffer = Math.min(
|
||||
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
|
||||
VHD_SECTOR_SIZE,
|
||||
SECTOR_SIZE,
|
||||
buffer.length
|
||||
)
|
||||
let inputBuffer
|
||||
@@ -695,7 +582,7 @@ export class Vhd {
|
||||
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
|
||||
buffer.copy(
|
||||
inputBuffer,
|
||||
offsetInBlockSectors * VHD_SECTOR_SIZE,
|
||||
offsetInBlockSectors * SECTOR_SIZE,
|
||||
startInBuffer,
|
||||
endInBuffer
|
||||
)
|
||||
@@ -710,10 +597,10 @@ export class Vhd {
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators (neededSectors) {
|
||||
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
|
||||
firstLocatorOffset / VHD_SECTOR_SIZE
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
firstLocatorOffset / SECTOR_SIZE
|
||||
if (currentSpace < neededSectors) {
|
||||
const deltaSectors = neededSectors - currentSpace
|
||||
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
|
||||
@@ -722,279 +609,23 @@ export class Vhd {
|
||||
}
|
||||
return firstLocatorOffset
|
||||
}
|
||||
}
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
//
|
||||
// Child must be a delta backup !
|
||||
// Parent must be a full backup !
|
||||
//
|
||||
// TODO: update the identifier of the parent VHD.
|
||||
export default concurrency(2)(async function vhdMerge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
// Child must be a delta.
|
||||
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
throw new Error('Unable to merge, child is not a delta backup.')
|
||||
}
|
||||
|
||||
// Allocation table map is not yet implemented.
|
||||
if (
|
||||
parentVhd.hasBlockAllocationTableMap() ||
|
||||
childVhd.hasBlockAllocationTableMap()
|
||||
) {
|
||||
throw new Error('Unsupported allocation table map.')
|
||||
}
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
async setUniqueParentLocator (fileNameString) {
|
||||
const { header } = this
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
|
||||
// returns true if the child was actually modified
|
||||
export async function chainVhd (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(parentName, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
|
||||
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await childVhd._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
|
||||
dataSpaceSectors
|
||||
)
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
return true
|
||||
}
|
||||
|
||||
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockTable()
|
||||
|
||||
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = vhds[nVhds - 1]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: 512 + 1024,
|
||||
parentTimestamp: rootVhd.header.parentTimestamp,
|
||||
parentUnicodeName: rootVhd.header.parentUnicodeName,
|
||||
parentUuid: rootVhd.header.parentUuid,
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(
|
||||
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
|
||||
)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock =
|
||||
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd === rootVhd
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * VHD_SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-perf-alert",
|
||||
"version": "0.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server",
|
||||
"version": "5.18.3",
|
||||
"version": "5.19.3",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -34,6 +34,7 @@
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"ajv": "^6.1.1",
|
||||
"app-conf": "^0.5.0",
|
||||
"archiver": "^2.1.0",
|
||||
@@ -59,6 +60,7 @@
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.6.2",
|
||||
"helmet": "^3.9.0",
|
||||
@@ -109,6 +111,7 @@
|
||||
"tmp": "^0.0.33",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.0.0",
|
||||
"ws": "^5.0.0",
|
||||
"xen-api": "^0.16.9",
|
||||
"xml2js": "^0.4.19",
|
||||
@@ -116,7 +119,8 @@
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.1.1",
|
||||
"xo-remote-parser": "^0.3",
|
||||
"xo-vmdk-to-vhd": "0.0.12"
|
||||
"xo-vmdk-to-vhd": "0.0.12",
|
||||
"yazl": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
import { basename } from 'path'
|
||||
|
||||
import { safeDateFormat } from '../utils'
|
||||
|
||||
export function createJob ({ schedules, ...job }) {
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules)
|
||||
@@ -171,3 +175,88 @@ importVmBackup.params = {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function listPartitions ({ remote, disk }) {
|
||||
return this.listBackupNgDiskPartitions(remote, disk)
|
||||
}
|
||||
|
||||
listPartitions.permission = 'admin'
|
||||
|
||||
listPartitions.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function listFiles ({ remote, disk, partition, path }) {
|
||||
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
|
||||
}
|
||||
|
||||
listFiles.permission = 'admin'
|
||||
|
||||
listFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
|
||||
const zipStream = await this.fetchBackupNgPartitionFiles(
|
||||
remote,
|
||||
disk,
|
||||
partition,
|
||||
paths
|
||||
)
|
||||
|
||||
res.setHeader('content-disposition', 'attachment')
|
||||
res.setHeader('content-type', 'application/octet-stream')
|
||||
return zipStream
|
||||
}
|
||||
|
||||
export async function fetchFiles (params) {
|
||||
const { paths } = params
|
||||
let filename = `restore_${safeDateFormat(new Date())}`
|
||||
if (paths.length === 1) {
|
||||
filename += `_${basename(paths[0])}`
|
||||
}
|
||||
filename += '.zip'
|
||||
|
||||
return this.registerHttpRequest(handleFetchFiles, params, {
|
||||
suffix: encodeURI(`/${filename}`),
|
||||
}).then(url => ({ $getFrom: url }))
|
||||
}
|
||||
|
||||
fetchFiles.permission = 'admin'
|
||||
|
||||
fetchFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
optional: true,
|
||||
type: 'string',
|
||||
},
|
||||
paths: {
|
||||
items: { type: 'string' },
|
||||
minLength: 1,
|
||||
type: 'array',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import getStream from 'get-stream'
|
||||
import { forEach } from 'lodash'
|
||||
|
||||
import { streamToBuffer } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export function clean () {
|
||||
@@ -61,7 +60,7 @@ getAllObjects.params = {
|
||||
export async function importConfig () {
|
||||
return {
|
||||
$sendTo: await this.registerHttpRequest(async (req, res) => {
|
||||
await this.importConfig(JSON.parse(await streamToBuffer(req)))
|
||||
await this.importConfig(JSON.parse(await getStream.buffer(req)))
|
||||
|
||||
res.end('config successfully imported')
|
||||
}),
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { isArray, map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => (fields, ...args) =>
|
||||
execa
|
||||
.stdout(command, [
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
return splitLines(
|
||||
await execa.stdout(command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
.then(stdout =>
|
||||
map(
|
||||
splitLines(stdout),
|
||||
isArray(fields)
|
||||
? parse
|
||||
: line => {
|
||||
const data = parse(line)
|
||||
return data[fields]
|
||||
}
|
||||
)
|
||||
)
|
||||
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
export { streamToNewBuffer as default }
|
||||
@@ -55,10 +55,6 @@ export const asyncMap = (collection, iteratee) => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export streamToBuffer from './stream-to-new-buffer'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function camelToSnakeCase (string) {
|
||||
return string.replace(
|
||||
/([a-z0-9])([A-Z])/g,
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
import execa from 'execa'
|
||||
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
|
||||
import LocalHandler from './remote-handlers/local.js'
|
||||
|
||||
async function testVhdMerge () {
|
||||
console.log('before merge')
|
||||
const moOfRandom = 4
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
|
||||
])
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
|
||||
])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'raw',
|
||||
'-Ovpc',
|
||||
'randomfile',
|
||||
'randomfile.vhd',
|
||||
])
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
|
||||
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
|
||||
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
|
||||
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
const childVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
console.log('changing type')
|
||||
await childVhd.readHeaderAndFooter()
|
||||
console.log('child vhd', childVhd.footer.currentSize, originalSize)
|
||||
await childVhd.readBlockTable()
|
||||
childVhd.footer.diskType = 4 // Delta backup.
|
||||
await childVhd.writeFooter()
|
||||
console.log('chained')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
console.log('merged')
|
||||
const parentVhd = new Vhd(handler, 'empty.vhd')
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
console.log('parent vhd', parentVhd.footer.currentSize)
|
||||
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-Oraw',
|
||||
'empty.vhd',
|
||||
'recovered',
|
||||
])
|
||||
await execa('truncate', ['-s', originalSize, 'recovered'])
|
||||
console.log('ls', (await execa('ls', ['-lt'])).stdout)
|
||||
console.log(
|
||||
'diff',
|
||||
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
|
||||
)
|
||||
|
||||
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
await vhd.ensureBatSize(300)
|
||||
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
*/
|
||||
console.log(await handler.list())
|
||||
console.log('lol')
|
||||
}
|
||||
|
||||
export { testVhdMerge as default }
|
||||
@@ -1026,8 +1026,6 @@ export default class Xapi extends XapiBase {
|
||||
sr: mapVdisSrs[vdi.uuid] || srId,
|
||||
})
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
return newVdi
|
||||
}
|
||||
|
||||
await asyncMap(vbds[vdiId], vbd =>
|
||||
@@ -1316,7 +1314,7 @@ export default class Xapi extends XapiBase {
|
||||
async _importOvaVm (
|
||||
$defer,
|
||||
stream,
|
||||
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus },
|
||||
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables },
|
||||
sr
|
||||
) {
|
||||
// 1. Create VM.
|
||||
@@ -1389,8 +1387,9 @@ export default class Xapi extends XapiBase {
|
||||
return
|
||||
}
|
||||
|
||||
const vhdStream = await vmdkToVhd(stream)
|
||||
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_RAW)
|
||||
const table = tables[entry.name]
|
||||
const vhdStream = await vmdkToVhd(stream, table)
|
||||
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
|
||||
|
||||
// See: https://github.com/mafintosh/tar-stream#extracting
|
||||
// No import parallelization.
|
||||
|
||||
@@ -37,6 +37,8 @@ declare class XapiObject {
|
||||
type Id = string | XapiObject
|
||||
declare export class Vm extends XapiObject {
|
||||
$snapshots: Vm[];
|
||||
is_a_snapshot: boolean;
|
||||
is_a_template: boolean;
|
||||
name_label: string;
|
||||
other_config: $Dict<string>;
|
||||
snapshot_time: number;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import deferrable from 'golike-defer'
|
||||
import every from 'lodash/every'
|
||||
import find from 'lodash/find'
|
||||
import filter from 'lodash/filter'
|
||||
import includes from 'lodash/includes'
|
||||
import isObject from 'lodash/isObject'
|
||||
import pickBy from 'lodash/pickBy'
|
||||
import some from 'lodash/some'
|
||||
import sortBy from 'lodash/sortBy'
|
||||
import assign from 'lodash/assign'
|
||||
@@ -445,10 +445,10 @@ export default {
|
||||
const installableByUuid =
|
||||
host.license_params.sku_type !== 'free'
|
||||
? await this._listMissingPoolPatchesOnHost(host)
|
||||
: filter(await this._listMissingPoolPatchesOnHost(host), {
|
||||
paid: false,
|
||||
upgrade: false,
|
||||
})
|
||||
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
|
||||
paid: false,
|
||||
upgrade: false,
|
||||
})
|
||||
|
||||
// List of all installable patches sorted from the newest to the
|
||||
// oldest.
|
||||
@@ -488,7 +488,7 @@ export default {
|
||||
patches =>
|
||||
host.license_params.sku_type !== 'free'
|
||||
? patches
|
||||
: filter(patches, { paid: false, upgrade: false })
|
||||
: pickBy(patches, { paid: false, upgrade: false })
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
// @flow
|
||||
|
||||
// $FlowFixMe
|
||||
import type RemoteHandler from '@xen-orchestra/fs'
|
||||
import defer from 'golike-defer'
|
||||
import { type Pattern, createPredicate } from 'value-matcher'
|
||||
import { type Readable, PassThrough } from 'stream'
|
||||
import { basename, dirname } from 'path'
|
||||
import { isEmpty, last, mapValues, noop, values } from 'lodash'
|
||||
import { timeout as pTimeout } from 'promise-toolbox'
|
||||
import Vhd, {
|
||||
chainVhd,
|
||||
createSyntheticStream as createVhdReadStream,
|
||||
} from 'vhd-lib'
|
||||
|
||||
import { type CallJob, type Executor, type Job } from '../jobs'
|
||||
import { type Schedule } from '../scheduling'
|
||||
|
||||
import type RemoteHandler from '../../remote-handlers/abstract'
|
||||
import createSizeStream from '../../size-stream'
|
||||
import {
|
||||
type DeltaVmExport,
|
||||
@@ -25,11 +29,6 @@ import {
|
||||
safeDateFormat,
|
||||
serializeError,
|
||||
} from '../../utils'
|
||||
import {
|
||||
chainVhd,
|
||||
createReadStream as createVhdReadStream,
|
||||
readVhdMetadata,
|
||||
} from '../../vhd-merge'
|
||||
|
||||
import { translateLegacyJob } from './migration'
|
||||
|
||||
@@ -145,6 +144,8 @@ const listReplicatedVms = (
|
||||
const oc = object.other_config
|
||||
if (
|
||||
object.$type === 'vm' &&
|
||||
!object.is_a_snapshot &&
|
||||
!object.is_a_template &&
|
||||
'start' in object.blocked_operations &&
|
||||
oc['xo:backup:schedule'] === scheduleId &&
|
||||
oc['xo:backup:sr'] === srId &&
|
||||
@@ -539,6 +540,19 @@ export default class BackupNg {
|
||||
// inject an id usable by importVmBackupNg()
|
||||
backups.forEach(backup => {
|
||||
backup.id = `${remoteId}/${backup._filename}`
|
||||
|
||||
const { vdis, vhds } = backup
|
||||
backup.disks =
|
||||
vhds === undefined
|
||||
? []
|
||||
: Object.keys(vhds).map(vdiId => {
|
||||
const vdi = vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
backupsByVm[vmUuid] = backups
|
||||
@@ -610,6 +624,16 @@ export default class BackupNg {
|
||||
const xapi = app.getXapi(vmUuid)
|
||||
const vm: Vm = (xapi.getObject(vmUuid): any)
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await xapi._updateObjectMapProperty(vm, 'other_config', {
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
|
||||
const { id: jobId, settings } = job
|
||||
const { id: scheduleId } = schedule
|
||||
|
||||
@@ -1042,9 +1066,13 @@ export default class BackupNg {
|
||||
const vhds = await asyncMap(
|
||||
await handler.list(dirname(path), { filter: isVhd, prependDir: true }),
|
||||
async path => {
|
||||
const metadata = await readVhdMetadata(handler, path)
|
||||
metadata.path = path
|
||||
return metadata
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
path,
|
||||
}
|
||||
}
|
||||
)
|
||||
const base = basename(path)
|
||||
@@ -1096,7 +1124,11 @@ export default class BackupNg {
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
let code
|
||||
if (
|
||||
error == null ||
|
||||
((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import {
|
||||
endsWith,
|
||||
filter,
|
||||
find,
|
||||
findIndex,
|
||||
includes,
|
||||
once,
|
||||
range,
|
||||
@@ -20,9 +19,13 @@ import {
|
||||
startsWith,
|
||||
trim,
|
||||
} from 'lodash'
|
||||
import {
|
||||
chainVhd,
|
||||
createSyntheticStream as createVhdReadStream,
|
||||
mergeVhd,
|
||||
} from 'vhd-lib'
|
||||
|
||||
import createSizeStream from '../size-stream'
|
||||
import vhdMerge, { chainVhd } from '../vhd-merge'
|
||||
import xapiObjectToXo from '../xapi-object-to-xo'
|
||||
import { lvs, pvs } from '../lvm'
|
||||
import {
|
||||
@@ -551,7 +554,7 @@ export default class {
|
||||
const backup = `${dir}/${backups[j]}`
|
||||
|
||||
try {
|
||||
mergedDataSize += await vhdMerge(handler, parent, handler, backup)
|
||||
mergedDataSize += await mergeVhd(handler, parent, handler, backup)
|
||||
} catch (e) {
|
||||
console.error('Unable to use vhd-util.', e)
|
||||
throw e
|
||||
@@ -566,33 +569,6 @@ export default class {
|
||||
return mergedDataSize
|
||||
}
|
||||
|
||||
async _listDeltaVdiDependencies (handler, filePath) {
|
||||
const dir = dirname(filePath)
|
||||
const filename = basename(filePath)
|
||||
const backups = await this._listVdiBackups(handler, dir)
|
||||
|
||||
// Search file. (delta or full backup)
|
||||
const i = findIndex(
|
||||
backups,
|
||||
backup => getVdiTimestamp(backup) === getVdiTimestamp(filename)
|
||||
)
|
||||
|
||||
if (i === -1) {
|
||||
throw new Error('VDI to import not found in this remote.')
|
||||
}
|
||||
|
||||
// Search full backup.
|
||||
let j
|
||||
|
||||
for (j = i; j >= 0 && isDeltaVdiBackup(backups[j]); j--);
|
||||
|
||||
if (j === -1) {
|
||||
throw new Error(`Unable to found full vdi backup of: ${filePath}`)
|
||||
}
|
||||
|
||||
return backups.slice(j, i + 1)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async _listDeltaVmBackups (handler, dir) {
|
||||
@@ -840,17 +816,17 @@ export default class {
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(delta.vdis, async (vdi, id) => {
|
||||
const vdisFolder = `${basePath}/${dirname(vdi.xoPath)}`
|
||||
const backups = await this._listDeltaVdiDependencies(
|
||||
handler,
|
||||
`${basePath}/${vdi.xoPath}`
|
||||
)
|
||||
let path = `${basePath}/${vdi.xoPath}`
|
||||
try {
|
||||
await handler.getSize(path)
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
|
||||
streams[`${id}.vhd`] = await Promise.all(
|
||||
mapToArray(backups, async backup =>
|
||||
handler.createReadStream(`${vdisFolder}/${backup}`)
|
||||
)
|
||||
)
|
||||
path = path.replace(/_delta\.vhd$/, '_full.vhd')
|
||||
}
|
||||
streams[`${id}.vhd`] = await createVhdReadStream(handler, path)
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1038,13 +1014,13 @@ export default class {
|
||||
// VHD path may need to be fixed.
|
||||
return endsWith(vhdPath, '_delta.vhd')
|
||||
? pFromCallback(cb => stat(vhdPath, cb)).then(
|
||||
() => vhdPath,
|
||||
error => {
|
||||
if (error && error.code === 'ENOENT') {
|
||||
return `${vhdPath.slice(0, -10)}_full.vhd`
|
||||
() => vhdPath,
|
||||
error => {
|
||||
if (error && error.code === 'ENOENT') {
|
||||
return `${vhdPath.slice(0, -10)}_full.vhd`
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
: vhdPath
|
||||
})
|
||||
.then(vhdPath => execa('vhdimount', [vhdPath, mountDir]))
|
||||
|
||||
332
packages/xo-server/src/xo-mixins/file-restore-ng.js
Normal file
332
packages/xo-server/src/xo-mixins/file-restore-ng.js
Normal file
@@ -0,0 +1,332 @@
|
||||
import defer from 'golike-defer'
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { createParser as createPairsParser } from 'parse-pairs'
|
||||
import { normalize } from 'path'
|
||||
import { readdir, rmdir, stat } from 'fs-extra'
|
||||
import { ZipFile } from 'yazl'
|
||||
|
||||
import { lvs, pvs } from '../lvm'
|
||||
import { resolveSubpath, tmpDir } from '../utils'
|
||||
|
||||
const IGNORED_PARTITION_TYPES = {
|
||||
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
|
||||
0x05: true,
|
||||
0x0f: true,
|
||||
0x15: true,
|
||||
0x5e: true,
|
||||
0x5f: true,
|
||||
0x85: true,
|
||||
0x91: true,
|
||||
0x9b: true,
|
||||
0xc5: true,
|
||||
0xcf: true,
|
||||
0xd5: true,
|
||||
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
const PARTITION_TYPE_NAMES = {
|
||||
0x07: 'NTFS',
|
||||
0x0c: 'FAT',
|
||||
0x83: 'linux',
|
||||
}
|
||||
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
const parsePartxLine = createPairsParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
valueTransform: (value, key) =>
|
||||
key === 'start' || key === 'size'
|
||||
? +value
|
||||
: key === 'type' ? PARTITION_TYPE_NAMES[+value] || value : value,
|
||||
})
|
||||
|
||||
const listLvmLogicalVolumes = defer(
|
||||
async ($defer, devicePath, partition, results = []) => {
|
||||
const pv = await mountLvmPhysicalVolume(devicePath, partition)
|
||||
$defer(pv.unmount)
|
||||
|
||||
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], pv.path)
|
||||
const partitionId = partition !== undefined ? partition.id : ''
|
||||
lvs.forEach((lv, i) => {
|
||||
const name = lv.lv_name
|
||||
if (name !== '') {
|
||||
results.push({
|
||||
id: `${partitionId}/${lv.vg_name}/${name}`,
|
||||
name,
|
||||
size: lv.lv_size,
|
||||
})
|
||||
}
|
||||
})
|
||||
return results
|
||||
}
|
||||
)
|
||||
|
||||
async function mountLvmPhysicalVolume (devicePath, partition) {
|
||||
const args = []
|
||||
if (partition !== undefined) {
|
||||
args.push('-o', partition.start * 512)
|
||||
}
|
||||
args.push('--show', '-f', devicePath)
|
||||
const path = (await execa.stdout('losetup', args)).trim()
|
||||
await execa('pvscan', ['--cache', path])
|
||||
|
||||
return {
|
||||
path,
|
||||
unmount: async () => {
|
||||
try {
|
||||
const vgNames = await pvs('vg_name', path)
|
||||
await execa('vgchange', ['-an', ...vgNames])
|
||||
} finally {
|
||||
await execa('losetup', ['-d', path])
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const mountPartition = defer(async ($defer, devicePath, partition) => {
|
||||
const options = ['loop', 'ro']
|
||||
|
||||
if (partition !== undefined) {
|
||||
const { start } = partition
|
||||
if (start !== undefined) {
|
||||
options.push(`offset=${start * 512}`)
|
||||
}
|
||||
}
|
||||
|
||||
const path = await tmpDir()
|
||||
$defer.onFailure(rmdir, path)
|
||||
|
||||
const mount = options =>
|
||||
execa('mount', [
|
||||
`--options=${options.join(',')}`,
|
||||
`--source=${devicePath}`,
|
||||
`--target=${path}`,
|
||||
])
|
||||
|
||||
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
|
||||
// another fs, try without
|
||||
try {
|
||||
await mount([...options, 'norecovery'])
|
||||
} catch (error) {
|
||||
await mount(options)
|
||||
}
|
||||
const unmount = async () => {
|
||||
await execa('umount', ['--lazy', path])
|
||||
return rmdir(path)
|
||||
}
|
||||
$defer.onFailure(unmount)
|
||||
|
||||
return { path, unmount }
|
||||
})
|
||||
|
||||
// - [x] list partitions
|
||||
// - [x] list files in a partition
|
||||
// - [x] list files in a bare partition
|
||||
// - [x] list LVM partitions
|
||||
//
|
||||
// - [ ] partitions with unmount debounce
|
||||
// - [ ] handle directory restore
|
||||
// - [ ] handle multiple entries restore (both dirs and files)
|
||||
// - [ ] by default use common path as root
|
||||
// - [ ] handle LVM partitions on multiple disks
|
||||
// - [ ] find mounted disks/partitions on start (in case of interruptions)
|
||||
//
|
||||
// - [ ] manual mount/unmount (of disk) for advance file restore
|
||||
// - could it stay mounted during the backup process?
|
||||
// - [ ] mountDisk (VHD)
|
||||
// - [ ] unmountDisk (only for manual mount)
|
||||
// - [ ] getMountedDisks
|
||||
// - [ ] mountPartition (optional)
|
||||
// - [ ] getMountedPartitions
|
||||
// - [ ] unmountPartition
|
||||
export default class BackupNgFileRestore {
|
||||
constructor (app) {
|
||||
this._app = app
|
||||
this._mounts = { __proto__: null }
|
||||
}
|
||||
|
||||
@defer
|
||||
async fetchBackupNgPartitionFiles (
|
||||
$defer,
|
||||
remoteId,
|
||||
diskId,
|
||||
partitionId,
|
||||
paths
|
||||
) {
|
||||
const disk = await this._mountDisk(remoteId, diskId)
|
||||
$defer.onFailure(disk.unmount)
|
||||
|
||||
const partition = await this._mountPartition(disk.path, partitionId)
|
||||
$defer.onFailure(partition.unmount)
|
||||
|
||||
const zip = new ZipFile()
|
||||
paths.forEach(file => {
|
||||
zip.addFile(resolveSubpath(partition.path, file), normalize('./' + file))
|
||||
})
|
||||
zip.end()
|
||||
return zip.outputStream.on('end', () =>
|
||||
partition.unmount().then(disk.unmount)
|
||||
)
|
||||
}
|
||||
|
||||
@defer
|
||||
async listBackupNgDiskPartitions ($defer, remoteId, diskId) {
|
||||
const disk = await this._mountDisk(remoteId, diskId)
|
||||
$defer(disk.unmount)
|
||||
return this._listPartitions(disk.path)
|
||||
}
|
||||
|
||||
@defer
|
||||
async listBackupNgPartitionFiles (
|
||||
$defer,
|
||||
remoteId,
|
||||
diskId,
|
||||
partitionId,
|
||||
path
|
||||
) {
|
||||
const disk = await this._mountDisk(remoteId, diskId)
|
||||
$defer(disk.unmount)
|
||||
|
||||
const partition = await this._mountPartition(disk.path, partitionId)
|
||||
$defer(partition.unmount)
|
||||
|
||||
path = resolveSubpath(partition.path, path)
|
||||
|
||||
const entriesMap = {}
|
||||
await Promise.all(
|
||||
(await readdir(path)).map(async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
return entriesMap
|
||||
}
|
||||
|
||||
async _findPartition (devicePath, partitionId) {
|
||||
const partitions = await this._listPartitions(devicePath, false)
|
||||
const partition = partitions.find(_ => _.id === partitionId)
|
||||
if (partition === undefined) {
|
||||
throw new Error(`partition ${partitionId} not found`)
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
async _listPartitions (devicePath, inspectLvmPv = true) {
|
||||
const stdout = await execa.stdout('partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
devicePath,
|
||||
])
|
||||
|
||||
const promises = []
|
||||
const partitions = []
|
||||
splitLines(stdout).forEach(line => {
|
||||
const partition = parsePartxLine(line)
|
||||
let { type } = partition
|
||||
if (type == null || (type = +type) in IGNORED_PARTITION_TYPES) {
|
||||
return
|
||||
}
|
||||
|
||||
if (inspectLvmPv && type === 0x8e) {
|
||||
promises.push(listLvmLogicalVolumes(devicePath, partition, partitions))
|
||||
return
|
||||
}
|
||||
|
||||
partitions.push(partition)
|
||||
})
|
||||
|
||||
await Promise.all(promises)
|
||||
|
||||
return partitions
|
||||
}
|
||||
|
||||
@defer
|
||||
async _mountDisk ($defer, remoteId, diskId) {
|
||||
const handler = await this._app.getRemoteHandler(remoteId)
|
||||
if (handler._getFilePath === undefined) {
|
||||
throw new Error(`this remote is not supported`)
|
||||
}
|
||||
|
||||
const diskPath = handler._getFilePath(diskId)
|
||||
const mountDir = await tmpDir()
|
||||
$defer.onFailure(rmdir, mountDir)
|
||||
|
||||
await execa('vhdimount', [diskPath, mountDir])
|
||||
const unmount = async () => {
|
||||
await execa('fusermount', ['-uz', mountDir])
|
||||
return rmdir(mountDir)
|
||||
}
|
||||
$defer.onFailure(unmount)
|
||||
|
||||
let max = 0
|
||||
let maxEntry
|
||||
const entries = await readdir(mountDir)
|
||||
entries.forEach(entry => {
|
||||
const matches = RE_VHDI.exec(entry)
|
||||
if (matches !== null) {
|
||||
const value = +matches[1]
|
||||
if (value > max) {
|
||||
max = value
|
||||
maxEntry = entry
|
||||
}
|
||||
}
|
||||
})
|
||||
if (max === 0) {
|
||||
throw new Error('no disks found')
|
||||
}
|
||||
|
||||
return {
|
||||
path: `${mountDir}/${maxEntry}`,
|
||||
unmount,
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
async _mountPartition ($defer, devicePath, partitionId) {
|
||||
if (partitionId === undefined) {
|
||||
return mountPartition(devicePath)
|
||||
}
|
||||
|
||||
if (partitionId.includes('/')) {
|
||||
const [pvId, vgName, lvName] = partitionId.split('/')
|
||||
const lvmPartition =
|
||||
pvId !== '' ? await this._findPartition(devicePath, pvId) : undefined
|
||||
|
||||
const pv = await mountLvmPhysicalVolume(devicePath, lvmPartition)
|
||||
|
||||
const unmountQueue = [pv.unmount]
|
||||
const unmount = async () => {
|
||||
let fn
|
||||
while ((fn = unmountQueue.pop()) !== undefined) {
|
||||
await fn()
|
||||
}
|
||||
}
|
||||
$defer.onFailure(unmount)
|
||||
|
||||
await execa('vgchange', ['-ay', vgName])
|
||||
unmountQueue.push(() => execa('vgchange', ['-an', vgName]))
|
||||
|
||||
const partition = await mountPartition(
|
||||
(await lvs(['lv_name', 'lv_path'], vgName)).find(
|
||||
_ => _.lv_name === lvName
|
||||
).lv_path
|
||||
)
|
||||
unmountQueue.push(partition.unmount)
|
||||
return { ...partition, unmount }
|
||||
}
|
||||
|
||||
return mountPartition(
|
||||
devicePath,
|
||||
await this._findPartition(devicePath, partitionId)
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -225,9 +225,10 @@ export default class Jobs {
|
||||
|
||||
runningJobs[id] = runJobId
|
||||
|
||||
let session
|
||||
try {
|
||||
const app = this._app
|
||||
const session = app.createUserConnection()
|
||||
session = app.createUserConnection()
|
||||
session.set('user_id', job.userId)
|
||||
|
||||
const status = await executor({
|
||||
@@ -255,6 +256,9 @@ export default class Jobs {
|
||||
throw error
|
||||
} finally {
|
||||
delete runningJobs[id]
|
||||
if (session !== undefined) {
|
||||
session.close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import { forEach, mapToArray } from '../utils'
|
||||
import { getHandler } from '../remote-handlers'
|
||||
import { Remotes } from '../models/remote'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// @flow
|
||||
|
||||
import mergeVhd_ from '../../vhd-merge'
|
||||
import { type Remote, getHandler } from '../../remote-handlers'
|
||||
import { type Remote, getHandler } from '@xen-orchestra/fs'
|
||||
import { mergeVhd as mergeVhd_ } from 'vhd-lib'
|
||||
|
||||
export function mergeVhd (
|
||||
parentRemote: Remote,
|
||||
|
||||
@@ -140,7 +140,11 @@ export default class Xo extends EventEmitter {
|
||||
}).then(
|
||||
result => {
|
||||
if (result != null) {
|
||||
res.end(JSON.stringify(result))
|
||||
if (typeof result.pipe === 'function') {
|
||||
result.pipe(res)
|
||||
} else {
|
||||
res.end(JSON.stringify(result))
|
||||
}
|
||||
}
|
||||
},
|
||||
error => {
|
||||
|
||||
3
packages/xo-vmdk-to-vhd/.babelrc.js
Normal file
3
packages/xo-vmdk-to-vhd/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -23,44 +23,34 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.18.0",
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"child-process-promise": "^2.0.3",
|
||||
"deflate-js": "^0.2.3",
|
||||
"fs-promise": "^2.0.0",
|
||||
"pipette": "^0.9.3"
|
||||
"pipette": "^0.9.3",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"tmp": "^0.0.33",
|
||||
"vhd-lib": "^0.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.18.0",
|
||||
"babel-plugin-transform-runtime": "^6.15.0",
|
||||
"babel-preset-env": "^1.0.0",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"mocha": "^4.0.1",
|
||||
"must": "^0.13.2",
|
||||
"rimraf": "^2.5.4"
|
||||
"event-to-promise": "^0.8.0",
|
||||
"execa": "^0.10.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"dev-test": "mocha --watch --reporter=min \"dist/**/*.spec.js\"",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"test-disabled": "mocha \"dist/**/*.spec.js\""
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1,16 @@
|
||||
export { convertFromVMDK as default } from './vhd-write'
|
||||
import { createReadableSparseStream } from 'vhd-lib'
|
||||
|
||||
import { VMDKDirectParser, readVmdkGrainTable } from './vmdk-read'
|
||||
|
||||
async function convertFromVMDK (vmdkReadStream, table) {
|
||||
const parser = new VMDKDirectParser(vmdkReadStream)
|
||||
const header = await parser.readHeader()
|
||||
return createReadableSparseStream(
|
||||
header.capacitySectors * 512,
|
||||
header.grainSizeSectors * 512,
|
||||
table,
|
||||
parser.blockIterator()
|
||||
)
|
||||
}
|
||||
|
||||
export { convertFromVMDK as default, readVmdkGrainTable }
|
||||
|
||||
@@ -1,340 +0,0 @@
|
||||
'use strict'
|
||||
import { open, write } from 'fs-promise'
|
||||
import stream from 'stream'
|
||||
import { VMDKDirectParser } from './vmdk-read'
|
||||
|
||||
const footerCookie = 'conectix'
|
||||
const creatorApp = 'xo '
|
||||
// it looks like everybody is using Wi2k
|
||||
const osString = 'Wi2k'
|
||||
const headerCookie = 'cxsparse'
|
||||
const fixedHardDiskType = 2
|
||||
const dynamicHardDiskType = 3
|
||||
|
||||
const sectorSize = 512
|
||||
|
||||
export function computeChecksum (buffer) {
|
||||
let sum = 0
|
||||
for (let i = 0; i < buffer.length; i++) {
|
||||
sum += buffer[i]
|
||||
}
|
||||
// http://stackoverflow.com/a/1908655/72637 the >>> prevents the number from going negative
|
||||
return ~sum >>> 0
|
||||
}
|
||||
|
||||
class Block {
|
||||
constructor (blockSize) {
|
||||
const bitmapSize = blockSize / sectorSize / 8
|
||||
const bufferSize =
|
||||
Math.ceil((blockSize + bitmapSize) / sectorSize) * sectorSize
|
||||
this.buffer = Buffer.alloc(bufferSize)
|
||||
this.bitmapBuffer = this.buffer.slice(0, bitmapSize)
|
||||
this.dataBuffer = this.buffer.slice(bitmapSize)
|
||||
this.bitmapBuffer.fill(0xff)
|
||||
}
|
||||
|
||||
writeData (buffer, offset = 0) {
|
||||
buffer.copy(this.dataBuffer, offset)
|
||||
}
|
||||
|
||||
async writeOnFile (file) {
|
||||
await write(file, this.buffer, 0, this.buffer.length)
|
||||
}
|
||||
}
|
||||
|
||||
class SparseExtent {
|
||||
constructor (dataSize, blockSize, startOffset) {
|
||||
this.table = createEmptyTable(dataSize, blockSize)
|
||||
this.blockSize = blockSize
|
||||
this.startOffset = (startOffset + this.table.buffer.length) / sectorSize
|
||||
}
|
||||
|
||||
get entryCount () {
|
||||
return this.table.entryCount
|
||||
}
|
||||
|
||||
_writeBlock (blockBuffer, tableIndex, offset) {
|
||||
if (blockBuffer.length + offset > this.blockSize) {
|
||||
throw new Error('invalid block geometry')
|
||||
}
|
||||
let entry = this.table.entries[tableIndex]
|
||||
if (entry === undefined) {
|
||||
entry = new Block(this.blockSize)
|
||||
this.table.entries[tableIndex] = entry
|
||||
}
|
||||
entry.writeData(blockBuffer, offset)
|
||||
}
|
||||
|
||||
writeBuffer (buffer, offset = 0) {
|
||||
const startBlock = Math.floor(offset / this.blockSize)
|
||||
const endBlock = Math.ceil((offset + buffer.length) / this.blockSize)
|
||||
for (let i = startBlock; i < endBlock; i++) {
|
||||
const blockDelta = offset - i * this.blockSize
|
||||
let blockBuffer, blockOffset
|
||||
if (blockDelta > 0) {
|
||||
blockBuffer = buffer.slice(0, (i + 1) * this.blockSize - offset)
|
||||
blockOffset = blockDelta
|
||||
} else {
|
||||
blockBuffer = buffer.slice(
|
||||
-blockDelta,
|
||||
(i + 1) * this.blockSize - offset
|
||||
)
|
||||
blockOffset = 0
|
||||
}
|
||||
this._writeBlock(blockBuffer, i, blockOffset)
|
||||
}
|
||||
}
|
||||
|
||||
async writeOnFile (file) {
|
||||
let currentOffset = this.startOffset
|
||||
for (let i = 0; i < this.table.entryCount; i++) {
|
||||
const block = this.table.entries[i]
|
||||
if (block !== undefined) {
|
||||
this.table.buffer.writeUInt32BE(currentOffset, i * 4)
|
||||
currentOffset += block.buffer.length / sectorSize
|
||||
}
|
||||
}
|
||||
await write(file, this.table.buffer, 0, this.table.buffer.length)
|
||||
for (let i = 0; i < this.table.entryCount; i++) {
|
||||
const block = this.table.entries[i]
|
||||
if (block !== undefined) {
|
||||
await block.writeOnFile(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class VHDFile {
|
||||
constructor (virtualSize, timestamp) {
|
||||
this.geomtry = computeGeometryForSize(virtualSize)
|
||||
this.timestamp = timestamp
|
||||
this.blockSize = 0x00200000
|
||||
this.sparseFile = new SparseExtent(
|
||||
this.geomtry.actualSize,
|
||||
this.blockSize,
|
||||
sectorSize * 3
|
||||
)
|
||||
}
|
||||
|
||||
writeBuffer (buffer, offset = 0) {
|
||||
this.sparseFile.writeBuffer(buffer, offset)
|
||||
}
|
||||
|
||||
async writeFile (fileName) {
|
||||
const fileFooter = createFooter(
|
||||
this.geomtry.actualSize,
|
||||
this.timestamp,
|
||||
this.geomtry,
|
||||
dynamicHardDiskType,
|
||||
512,
|
||||
0
|
||||
)
|
||||
const diskHeader = createDynamicDiskHeader(
|
||||
this.sparseFile.entryCount,
|
||||
this.blockSize
|
||||
)
|
||||
const file = await open(fileName, 'w')
|
||||
await write(file, fileFooter, 0, fileFooter.length)
|
||||
await write(file, diskHeader, 0, diskHeader.length)
|
||||
await this.sparseFile.writeOnFile(file)
|
||||
await write(file, fileFooter, 0, fileFooter.length)
|
||||
}
|
||||
}
|
||||
|
||||
export function computeGeometryForSize (size) {
|
||||
const totalSectors = Math.ceil(size / 512)
|
||||
let sectorsPerTrack
|
||||
let heads
|
||||
let cylinderTimesHeads
|
||||
if (totalSectors > 65535 * 16 * 255) {
|
||||
throw Error('disk is too big')
|
||||
}
|
||||
// straight copypasta from the file spec appendix on CHS Calculation
|
||||
if (totalSectors >= 65535 * 16 * 63) {
|
||||
sectorsPerTrack = 255
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrack
|
||||
} else {
|
||||
sectorsPerTrack = 17
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrack
|
||||
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
|
||||
if (heads < 4) {
|
||||
heads = 4
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
|
||||
sectorsPerTrack = 31
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrack
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024) {
|
||||
sectorsPerTrack = 63
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrack
|
||||
}
|
||||
}
|
||||
const cylinders = Math.floor(cylinderTimesHeads / heads)
|
||||
const actualSize = cylinders * heads * sectorsPerTrack * sectorSize
|
||||
return { cylinders, heads, sectorsPerTrack, actualSize }
|
||||
}
|
||||
|
||||
export function createFooter (
|
||||
size,
|
||||
timestamp,
|
||||
geometry,
|
||||
diskType,
|
||||
dataOffsetLow = 0xffffffff,
|
||||
dataOffsetHigh = 0xffffffff
|
||||
) {
|
||||
const footer = Buffer.alloc(512)
|
||||
Buffer.from(footerCookie, 'ascii').copy(footer)
|
||||
footer.writeUInt32BE(2, 8)
|
||||
footer.writeUInt32BE(0x00010000, 12)
|
||||
footer.writeUInt32BE(dataOffsetHigh, 16)
|
||||
footer.writeUInt32BE(dataOffsetLow, 20)
|
||||
footer.writeUInt32BE(timestamp, 24)
|
||||
Buffer.from(creatorApp, 'ascii').copy(footer, 28)
|
||||
Buffer.from(osString, 'ascii').copy(footer, 36)
|
||||
// do not use & 0xFFFFFFFF to extract lower bits, that would propagate a negative sign if the 2^31 bit is one
|
||||
const sizeHigh = Math.floor(size / Math.pow(2, 32)) % Math.pow(2, 32)
|
||||
const sizeLow = size % Math.pow(2, 32)
|
||||
footer.writeUInt32BE(sizeHigh, 40)
|
||||
footer.writeUInt32BE(sizeLow, 44)
|
||||
footer.writeUInt32BE(sizeHigh, 48)
|
||||
footer.writeUInt32BE(sizeLow, 52)
|
||||
footer.writeUInt16BE(geometry['cylinders'], 56)
|
||||
footer.writeUInt8(geometry['heads'], 58)
|
||||
footer.writeUInt8(geometry['sectorsPerTrack'], 59)
|
||||
footer.writeUInt32BE(diskType, 60)
|
||||
const checksum = computeChecksum(footer)
|
||||
footer.writeUInt32BE(checksum, 64)
|
||||
return footer
|
||||
}
|
||||
|
||||
export function createDynamicDiskHeader (tableEntries, blockSize) {
|
||||
const header = Buffer.alloc(1024)
|
||||
Buffer.from(headerCookie, 'ascii').copy(header)
|
||||
// hard code no next data
|
||||
header.writeUInt32BE(0xffffffff, 8)
|
||||
header.writeUInt32BE(0xffffffff, 12)
|
||||
// hard code table offset
|
||||
header.writeUInt32BE(0, 16)
|
||||
header.writeUInt32BE(sectorSize * 3, 20)
|
||||
header.writeUInt32BE(0x00010000, 24)
|
||||
header.writeUInt32BE(tableEntries, 28)
|
||||
header.writeUInt32BE(blockSize, 32)
|
||||
const checksum = computeChecksum(header)
|
||||
header.writeUInt32BE(checksum, 36)
|
||||
return header
|
||||
}
|
||||
|
||||
export function createEmptyTable (dataSize, blockSize) {
|
||||
const blockCount = Math.ceil(dataSize / blockSize)
|
||||
const tableSizeSectors = Math.ceil(blockCount * 4 / sectorSize)
|
||||
const buffer = Buffer.alloc(tableSizeSectors * sectorSize, 0xff)
|
||||
return { entryCount: blockCount, buffer: buffer, entries: [] }
|
||||
}
|
||||
|
||||
export class ReadableRawVHDStream extends stream.Readable {
|
||||
constructor (size, vmdkParser) {
|
||||
super()
|
||||
this.size = size
|
||||
const geometry = computeGeometryForSize(size)
|
||||
this.footer = createFooter(
|
||||
size,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry,
|
||||
fixedHardDiskType
|
||||
)
|
||||
this.position = 0
|
||||
this.vmdkParser = vmdkParser
|
||||
this.done = false
|
||||
this.busy = false
|
||||
this.currentFile = []
|
||||
}
|
||||
|
||||
filePadding (paddingLength) {
|
||||
if (paddingLength !== 0) {
|
||||
const chunkSize = 1024 * 1024 // 1Mo
|
||||
const chunkCount = Math.floor(paddingLength / chunkSize)
|
||||
for (let i = 0; i < chunkCount; i++) {
|
||||
this.currentFile.push(() => {
|
||||
const paddingBuffer = Buffer.alloc(chunkSize)
|
||||
return paddingBuffer
|
||||
})
|
||||
}
|
||||
this.currentFile.push(() => {
|
||||
const paddingBuffer = Buffer.alloc(paddingLength % chunkSize)
|
||||
return paddingBuffer
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async pushNextBlock () {
|
||||
const next = await this.vmdkParser.next()
|
||||
if (next === null) {
|
||||
const paddingLength = this.size - this.position
|
||||
this.filePadding(paddingLength)
|
||||
this.currentFile.push(() => this.footer)
|
||||
this.currentFile.push(() => {
|
||||
this.done = true
|
||||
return null
|
||||
})
|
||||
} else {
|
||||
const offset = next.lbaBytes
|
||||
const buffer = next.grain
|
||||
const paddingLength = offset - this.position
|
||||
if (paddingLength < 0) {
|
||||
process.nextTick(() =>
|
||||
this.emit(
|
||||
'error',
|
||||
'This VMDK file does not have its blocks in the correct order'
|
||||
)
|
||||
)
|
||||
}
|
||||
this.filePadding(paddingLength)
|
||||
this.currentFile.push(() => buffer)
|
||||
this.position = offset + buffer.length
|
||||
}
|
||||
return this.pushFileUntilFull()
|
||||
}
|
||||
|
||||
// returns true if the file is empty
|
||||
pushFileUntilFull () {
|
||||
while (true) {
|
||||
if (this.currentFile.length === 0) {
|
||||
break
|
||||
}
|
||||
const result = this.push(this.currentFile.shift()())
|
||||
if (!result) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return this.currentFile.length === 0
|
||||
}
|
||||
|
||||
async pushNextUntilFull () {
|
||||
while (!this.done && (await this.pushNextBlock())) {}
|
||||
}
|
||||
|
||||
_read () {
|
||||
if (this.busy || this.done) {
|
||||
return
|
||||
}
|
||||
if (this.pushFileUntilFull()) {
|
||||
this.busy = true
|
||||
this.pushNextUntilFull()
|
||||
.then(() => {
|
||||
this.busy = false
|
||||
})
|
||||
.catch(error => {
|
||||
process.nextTick(() => this.emit('error', error))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function convertFromVMDK (vmdkReadStream) {
|
||||
const parser = new VMDKDirectParser(vmdkReadStream)
|
||||
const header = await parser.readHeader()
|
||||
return new ReadableRawVHDStream(header.capacitySectors * sectorSize, parser)
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
import expect from 'must'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { describe, it } from 'mocha'
|
||||
import { exec } from 'child-process-promise'
|
||||
import { readFile } from 'fs-promise'
|
||||
|
||||
import {
|
||||
computeChecksum,
|
||||
computeGeometryForSize,
|
||||
createDynamicDiskHeader,
|
||||
createFooter,
|
||||
ReadableRawVHDStream,
|
||||
VHDFile,
|
||||
} from './vhd-write'
|
||||
|
||||
describe('VHD writing', () => {
|
||||
it('computeChecksum() is correct against some reference values', () => {
|
||||
// those values were taken from a file generated by qemu
|
||||
const testValue1 =
|
||||
'636F6E6563746978000000020001000000000000000002001F34DB9F71656D75000500035769326B0000000000019800000000000001980000030411000000030000000033B3A5E17F94433498376740246E5660'
|
||||
const expectedChecksum1 = 0xffffefb2
|
||||
const testValue2 =
|
||||
'6378737061727365FFFFFFFFFFFFFFFF0000000000000600000100000000000100200000'
|
||||
const expectedChecksum2 = 0xfffff476
|
||||
expect(computeChecksum(Buffer.from(testValue1, 'hex'))).to.equal(
|
||||
expectedChecksum1
|
||||
)
|
||||
expect(computeChecksum(Buffer.from(testValue2, 'hex'))).to.equal(
|
||||
expectedChecksum2
|
||||
)
|
||||
})
|
||||
|
||||
it('createFooter() does not crash', () => {
|
||||
createFooter(104448, Math.floor(Date.now() / 1000), {
|
||||
cylinders: 3,
|
||||
heads: 4,
|
||||
sectorsPerTrack: 17,
|
||||
})
|
||||
})
|
||||
|
||||
it('createDynamicDiskHeader() does not crash', () => {
|
||||
createDynamicDiskHeader(1, 0x00200000)
|
||||
})
|
||||
|
||||
it('ReadableRawVHDStream does not crash', () => {
|
||||
const data = [
|
||||
{
|
||||
lbaBytes: 100,
|
||||
grain: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
lbaBytes: 700,
|
||||
grain: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
const stream = new ReadableRawVHDStream(100000, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
return new Promise((resolve, reject) => {
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
})
|
||||
|
||||
it('ReadableRawVHDStream detects when blocks are out of order', () => {
|
||||
const data = [
|
||||
{
|
||||
lbaBytes: 700,
|
||||
grain: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
lbaBytes: 100,
|
||||
grain: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
return expect(
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = new ReadableRawVHDStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
).to.reject.to.equal(
|
||||
'This VMDK file does not have its blocks in the correct order'
|
||||
)
|
||||
})
|
||||
|
||||
it('writing a known file with VHDFile is successful', async () => {
|
||||
const fileName = 'output.vhd'
|
||||
const rawFilename = 'output.raw'
|
||||
const randomFileName = 'random.raw'
|
||||
const geometry = computeGeometryForSize(1024 * 1024 * 8)
|
||||
const dataSize = geometry.actualSize
|
||||
await exec(
|
||||
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + randomFileName
|
||||
)
|
||||
const buffer = await readFile(randomFileName)
|
||||
const f = new VHDFile(buffer.length, 523557791)
|
||||
const splitPoint = Math.floor(Math.random() * buffer.length)
|
||||
f.writeBuffer(buffer.slice(splitPoint), splitPoint)
|
||||
f.writeBuffer(buffer.slice(0, splitPoint), 0)
|
||||
f.writeBuffer(buffer.slice(splitPoint), splitPoint)
|
||||
await f.writeFile(fileName)
|
||||
await exec('qemu-img convert -fvpc -Oraw ' + fileName + ' ' + rawFilename)
|
||||
const fileContent = await readFile(rawFilename)
|
||||
expect(fileContent.length).to.equal(dataSize)
|
||||
for (let i = 0; i < fileContent.length; i++) {
|
||||
expect(fileContent[i]).to.equal(buffer[i])
|
||||
}
|
||||
})
|
||||
})
|
||||
35
packages/xo-vmdk-to-vhd/src/virtual-buffer.integ.spec.js
Normal file
35
packages/xo-vmdk-to-vhd/src/virtual-buffer.integ.spec.js
Normal file
@@ -0,0 +1,35 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createReadStream, readFile } from 'fs-promise'
|
||||
import { exec } from 'child-process-promise'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { VirtualBuffer } from './virtual-buffer'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('Virtual Buffer can read a file correctly', async () => {
|
||||
const rawFileName = 'random-data'
|
||||
await exec('base64 /dev/urandom | head -c 1048 > ' + rawFileName)
|
||||
const buffer = new VirtualBuffer(createReadStream(rawFileName))
|
||||
const part1 = await buffer.readChunk(10)
|
||||
const part2 = await buffer.readChunk(1038)
|
||||
const original = await readFile(rawFileName)
|
||||
expect(buffer.isDepleted).toBeTruthy()
|
||||
expect(Buffer.concat([part1, part2]).toString('ascii')).toEqual(
|
||||
original.toString('ascii')
|
||||
)
|
||||
})
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
import { Slicer } from 'pipette'
|
||||
|
||||
const chunkSize = 1024 * 1024
|
||||
|
||||
export class VirtualBuffer {
|
||||
constructor (readStream) {
|
||||
this.slicer = new Slicer(readStream)
|
||||
@@ -15,43 +13,23 @@ export class VirtualBuffer {
|
||||
return !this.slicer.readable
|
||||
}
|
||||
|
||||
// length = -1 means 'until the end'
|
||||
async readChunk (length, label) {
|
||||
const _this = this
|
||||
if (this.promise !== null) {
|
||||
throw new Error('pomise already there !!!', this.promise)
|
||||
}
|
||||
if (length === -1) {
|
||||
const chunks = []
|
||||
let error = false
|
||||
do {
|
||||
const res = await new Promise((resolve, reject) => {
|
||||
this.slicer.read(chunkSize, (error, length, data, offset) => {
|
||||
if (error !== false && error !== true) {
|
||||
reject(error)
|
||||
} else {
|
||||
resolve({ error, data })
|
||||
}
|
||||
})
|
||||
})
|
||||
error = res.error
|
||||
chunks.push(res.data)
|
||||
} while (error === false)
|
||||
return Buffer.concat(chunks)
|
||||
} else {
|
||||
this.promise = label
|
||||
return new Promise((resolve, reject) => {
|
||||
this.slicer.read(length, (error, actualLength, data, offset) => {
|
||||
if (error !== false && error !== true) {
|
||||
_this.promise = null
|
||||
reject(error)
|
||||
} else {
|
||||
_this.promise = null
|
||||
_this.position += data.length
|
||||
resolve(data)
|
||||
}
|
||||
})
|
||||
this.promise = label
|
||||
return new Promise((resolve, reject) => {
|
||||
this.slicer.read(length, (error, actualLength, data, offset) => {
|
||||
if (error !== false && error !== true) {
|
||||
_this.promise = null
|
||||
reject(error)
|
||||
} else {
|
||||
_this.promise = null
|
||||
_this.position += data.length
|
||||
resolve(data)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import expect from 'must'
|
||||
import { createReadStream, readFile } from 'fs-promise'
|
||||
import { describe, it } from 'mocha'
|
||||
import { exec } from 'child-process-promise'
|
||||
|
||||
import { VirtualBuffer } from './virtual-buffer'
|
||||
|
||||
describe('Virtual Buffer', function () {
|
||||
it('can read a file correctly', async () => {
|
||||
const rawFileName = 'random-data'
|
||||
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
|
||||
const buffer = new VirtualBuffer(createReadStream(rawFileName))
|
||||
const part1 = await buffer.readChunk(10)
|
||||
const part2 = await buffer.readChunk(-1)
|
||||
const original = await readFile(rawFileName)
|
||||
expect(buffer.isDepleted).to.be.true()
|
||||
expect(Buffer.concat([part1, part2]).toString('ascii')).to.equal(
|
||||
original.toString('ascii')
|
||||
)
|
||||
})
|
||||
})
|
||||
48
packages/xo-vmdk-to-vhd/src/vmdk-read.integ.spec.js
Normal file
48
packages/xo-vmdk-to-vhd/src/vmdk-read.integ.spec.js
Normal file
@@ -0,0 +1,48 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createReadStream } from 'fs-promise'
|
||||
import { exec } from 'child-process-promise'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { VMDKDirectParser } from './vmdk-read'
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('VMDKDirectParser reads OK', async () => {
|
||||
const rawFileName = 'random-data'
|
||||
const fileName = 'random-data.vmdk'
|
||||
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
|
||||
await exec(
|
||||
'rm -f ' +
|
||||
fileName +
|
||||
'&& python /usr/share/pyshared/VMDKstream.py ' +
|
||||
rawFileName +
|
||||
' ' +
|
||||
fileName
|
||||
)
|
||||
const parser = new VMDKDirectParser(createReadStream(fileName))
|
||||
const header = await parser.readHeader()
|
||||
const harvested = []
|
||||
for await (const res of parser.blockIterator()) {
|
||||
harvested.push(res)
|
||||
}
|
||||
expect(harvested.length).toEqual(2)
|
||||
expect(harvested[0].offsetBytes).toEqual(0)
|
||||
expect(harvested[0].data.length).toEqual(header['grainSizeSectors'] * 512)
|
||||
expect(harvested[1].offsetBytes).toEqual(header['grainSizeSectors'] * 512)
|
||||
})
|
||||
@@ -1,6 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
import zlib from 'zlib'
|
||||
|
||||
import { VirtualBuffer } from './virtual-buffer'
|
||||
|
||||
const sectorSize = 512
|
||||
@@ -266,7 +267,7 @@ export class VMDKDirectParser {
|
||||
return this.header
|
||||
}
|
||||
|
||||
async next () {
|
||||
async * blockIterator () {
|
||||
while (!this.virtualBuffer.isDepleted) {
|
||||
const position = this.virtualBuffer.position
|
||||
const sector = await this.virtualBuffer.readChunk(
|
||||
@@ -293,79 +294,73 @@ export class VMDKDirectParser {
|
||||
'grain remainder ' + this.virtualBuffer.position
|
||||
)
|
||||
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
|
||||
return readGrain(
|
||||
const grain = await readGrain(
|
||||
0,
|
||||
grainBuffer,
|
||||
this.header.compressionMethod === compressionDeflate &&
|
||||
this.header.flags.compressedGrains
|
||||
)
|
||||
}
|
||||
}
|
||||
return new Promise(resolve => resolve(null))
|
||||
}
|
||||
}
|
||||
|
||||
export async function readRawContent (readStream) {
|
||||
const virtualBuffer = new VirtualBuffer(readStream)
|
||||
const headerBuffer = await virtualBuffer.readChunk(512, 'header')
|
||||
let header = parseHeader(headerBuffer)
|
||||
|
||||
// I think the multiplications are OK, because the descriptor is always at the beginning of the file
|
||||
const descriptorLength = header.descriptorSizeSectors * sectorSize
|
||||
const descriptorBuffer = await virtualBuffer.readChunk(
|
||||
descriptorLength,
|
||||
'descriptor'
|
||||
)
|
||||
const descriptor = parseDescriptor(descriptorBuffer)
|
||||
|
||||
// TODO: we concat them back for now so that the indices match, we'll have to introduce a bias later
|
||||
const remainingBuffer = await virtualBuffer.readChunk(-1, 'remainder')
|
||||
const buffer = Buffer.concat([
|
||||
headerBuffer,
|
||||
descriptorBuffer,
|
||||
remainingBuffer,
|
||||
])
|
||||
if (header.grainDirectoryOffsetSectors === -1) {
|
||||
header = parseHeader(buffer.slice(-1024, -1024 + sectorSize))
|
||||
}
|
||||
const rawOutputBuffer = Buffer.alloc(header.capacitySectors * sectorSize)
|
||||
const l1Size = Math.floor(
|
||||
(header.capacitySectors + header.l1EntrySectors - 1) / header.l1EntrySectors
|
||||
)
|
||||
const l2Size = header.numGTEsPerGT
|
||||
const l1 = []
|
||||
for (let i = 0; i < l1Size; i++) {
|
||||
const l1Entry = buffer.readUInt32LE(
|
||||
header.grainDirectoryOffsetSectors * sectorSize + 4 * i
|
||||
)
|
||||
if (l1Entry !== 0) {
|
||||
l1.push(l1Entry)
|
||||
const l2 = []
|
||||
for (let j = 0; j < l2Size; j++) {
|
||||
const l2Entry = buffer.readUInt32LE(l1Entry * sectorSize + 4 * j)
|
||||
if (l2Entry !== 0 && l2Entry !== 1) {
|
||||
const grain = await readGrain(
|
||||
l2Entry,
|
||||
buffer,
|
||||
header['flags']['compressedGrains']
|
||||
)
|
||||
grain.grain.copy(rawOutputBuffer, grain.lba * sectorSize)
|
||||
l2[j] = grain
|
||||
}
|
||||
yield { offsetBytes: grain.lbaBytes, data: grain.grain }
|
||||
}
|
||||
}
|
||||
}
|
||||
const vmdkType = descriptor['descriptor']['createType']
|
||||
if (!vmdkType || vmdkType.toLowerCase() !== 'streamOptimized'.toLowerCase()) {
|
||||
throw new Error(
|
||||
'unsupported VMDK type "' +
|
||||
vmdkType +
|
||||
'", only streamOptimized is supported'
|
||||
}
|
||||
|
||||
export async function readVmdkGrainTable (fileAccessor) {
|
||||
let headerBuffer = await fileAccessor(0, 512)
|
||||
let grainDirAddr = headerBuffer.slice(56, 56 + 8)
|
||||
if (
|
||||
new Int8Array(grainDirAddr).reduce((acc, val) => acc && val === -1, true)
|
||||
) {
|
||||
headerBuffer = await fileAccessor(-1024, -1024 + 512)
|
||||
grainDirAddr = new DataView(headerBuffer.slice(56, 56 + 8)).getUint32(
|
||||
0,
|
||||
true
|
||||
)
|
||||
}
|
||||
return {
|
||||
descriptor: descriptor.descriptor,
|
||||
extents: descriptor.extents,
|
||||
rawFile: rawOutputBuffer,
|
||||
const grainDirPosBytes = grainDirAddr * 512
|
||||
const capacity =
|
||||
new DataView(headerBuffer.slice(12, 12 + 8)).getUint32(0, true) * 512
|
||||
const grainSize =
|
||||
new DataView(headerBuffer.slice(20, 20 + 8)).getUint32(0, true) * 512
|
||||
const grainCount = Math.ceil(capacity / grainSize)
|
||||
const numGTEsPerGT = new DataView(headerBuffer.slice(44, 44 + 8)).getUint32(
|
||||
0,
|
||||
true
|
||||
)
|
||||
const grainTablePhysicalSize = numGTEsPerGT * 4
|
||||
const grainDirectoryEntries = Math.ceil(grainCount / numGTEsPerGT)
|
||||
const grainDirectoryPhysicalSize = grainDirectoryEntries * 4
|
||||
const grainDirBuffer = await fileAccessor(
|
||||
grainDirPosBytes,
|
||||
grainDirPosBytes + grainDirectoryPhysicalSize
|
||||
)
|
||||
const grainDir = new Uint32Array(grainDirBuffer)
|
||||
const cachedGrainTables = []
|
||||
for (let i = 0; i < grainDirectoryEntries; i++) {
|
||||
const grainTableAddr = grainDir[i] * 512
|
||||
if (grainTableAddr !== 0) {
|
||||
cachedGrainTables[i] = new Uint32Array(
|
||||
await fileAccessor(
|
||||
grainTableAddr,
|
||||
grainTableAddr + grainTablePhysicalSize
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
const extractedGrainTable = []
|
||||
for (let i = 0; i < grainCount; i++) {
|
||||
const directoryEntry = Math.floor(i / numGTEsPerGT)
|
||||
const grainTable = cachedGrainTables[directoryEntry]
|
||||
if (grainTable !== undefined) {
|
||||
const grainAddr = grainTable[i % numGTEsPerGT]
|
||||
if (grainAddr !== 0) {
|
||||
extractedGrainTable.push([i, grainAddr])
|
||||
}
|
||||
}
|
||||
}
|
||||
extractedGrainTable.sort(
|
||||
([i1, grainAddress1], [i2, grainAddress2]) => grainAddress1 - grainAddress2
|
||||
)
|
||||
return extractedGrainTable.map(([index, grainAddress]) => index * grainSize)
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
import expect from 'must'
|
||||
import { createReadStream } from 'fs-promise'
|
||||
import { describe, it } from 'mocha'
|
||||
import { exec } from 'child-process-promise'
|
||||
|
||||
import { VMDKDirectParser } from './vmdk-read'
|
||||
|
||||
describe('VMDK reading', () => {
|
||||
it('VMDKDirectParser reads OK', async () => {
|
||||
const rawFileName = 'random-data'
|
||||
const fileName = 'random-data.vmdk'
|
||||
await exec('base64 /dev/urandom | head -c 104448 > ' + rawFileName)
|
||||
await exec(
|
||||
'rm -f ' +
|
||||
fileName +
|
||||
'&& VBoxManage convertfromraw --format VMDK --variant Stream ' +
|
||||
rawFileName +
|
||||
' ' +
|
||||
fileName
|
||||
)
|
||||
const parser = new VMDKDirectParser(createReadStream(fileName))
|
||||
const header = await parser.readHeader()
|
||||
const harvested = []
|
||||
while (true) {
|
||||
const res = await parser.next()
|
||||
if (res === null) {
|
||||
break
|
||||
}
|
||||
harvested.push(res)
|
||||
}
|
||||
expect(harvested.length).to.equal(2)
|
||||
expect(harvested[0].lba).to.equal(0)
|
||||
expect(harvested[1].lba).to.equal(header['grainSizeSectors'])
|
||||
}).timeout(10000)
|
||||
})
|
||||
91
packages/xo-vmdk-to-vhd/src/vmdk-to-vhd.integ.spec.js
Normal file
91
packages/xo-vmdk-to-vhd/src/vmdk-to-vhd.integ.spec.js
Normal file
@@ -0,0 +1,91 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { createReadStream, createWriteStream, stat } from 'fs-promise'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import convertFromVMDK, { readVmdkGrainTable } from '.'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
jest.setTimeout(100000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
function createFileAccessor (file) {
|
||||
return async (start, end) => {
|
||||
if (start < 0 || end < 0) {
|
||||
const fileLength = (await stat(file)).size
|
||||
start = start < 0 ? fileLength + start : start
|
||||
end = end < 0 ? fileLength + end : end
|
||||
}
|
||||
const result = await getStream.buffer(
|
||||
createReadStream(file, { start, end: end - 1 })
|
||||
)
|
||||
// crazy stuff to get a browser-compatible ArrayBuffer from a node buffer
|
||||
// https://stackoverflow.com/a/31394257/72637
|
||||
return result.buffer.slice(
|
||||
result.byteOffset,
|
||||
result.byteOffset + result.byteLength
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
test('VMDK to VHD can convert a random data file with VMDKDirectParser', async () => {
|
||||
const inputRawFileName = 'random-data.raw'
|
||||
const vmdkFileName = 'random-data.vmdk'
|
||||
const vhdFileName = 'from-vmdk-VMDKDirectParser.vhd'
|
||||
const reconvertedFromVhd = 'from-vhd.raw'
|
||||
const reconvertedFromVmdk = 'from-vhd-by-vbox.raw'
|
||||
const dataSize = 8355840 // this number is an integer head/cylinder/count equation solution
|
||||
try {
|
||||
await execa.shell(
|
||||
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
|
||||
)
|
||||
await execa.shell(
|
||||
'python /usr/share/pyshared/VMDKstream.py ' +
|
||||
inputRawFileName +
|
||||
' ' +
|
||||
vmdkFileName
|
||||
)
|
||||
const result = await readVmdkGrainTable(createFileAccessor(vmdkFileName))
|
||||
const pipe = (await convertFromVMDK(
|
||||
createReadStream(vmdkFileName),
|
||||
result
|
||||
)).pipe(createWriteStream(vhdFileName))
|
||||
await eventToPromise(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdFileName])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-fvmdk',
|
||||
'-Oraw',
|
||||
vmdkFileName,
|
||||
reconvertedFromVmdk,
|
||||
])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-fvpc',
|
||||
'-Oraw',
|
||||
vhdFileName,
|
||||
reconvertedFromVhd,
|
||||
])
|
||||
await execa('qemu-img', ['compare', inputRawFileName, vhdFileName])
|
||||
} catch (error) {
|
||||
console.error(error.stdout)
|
||||
console.error(error.stderr)
|
||||
console.error(error.message)
|
||||
throw error
|
||||
}
|
||||
})
|
||||
@@ -1,115 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
import { describe, it } from 'mocha'
|
||||
import { exec } from 'child-process-promise'
|
||||
import { createReadStream, createWriteStream } from 'fs-promise'
|
||||
|
||||
import { readRawContent } from './vmdk-read'
|
||||
import { VHDFile, convertFromVMDK, computeGeometryForSize } from './vhd-write'
|
||||
|
||||
describe('VMDK to VHD conversion', () => {
|
||||
it('can convert a random data file with readRawContent()', async () => {
|
||||
const inputRawFileName = 'random-data.raw'
|
||||
const vmdkFileName = 'random-data.vmdk'
|
||||
const vhdFileName = 'from-vmdk-readRawContent.vhd'
|
||||
const reconvertedRawFilemane = 'from-vhd.raw'
|
||||
const dataSize = 5222400
|
||||
await exec(
|
||||
'rm -f ' +
|
||||
[
|
||||
inputRawFileName,
|
||||
vmdkFileName,
|
||||
vhdFileName,
|
||||
reconvertedRawFilemane,
|
||||
].join(' ')
|
||||
)
|
||||
await exec(
|
||||
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
|
||||
)
|
||||
await exec(
|
||||
'VBoxManage convertfromraw --format VMDK --variant Stream ' +
|
||||
inputRawFileName +
|
||||
' ' +
|
||||
vmdkFileName
|
||||
)
|
||||
const rawContent = (await readRawContent(createReadStream(vmdkFileName)))
|
||||
.rawFile
|
||||
const f = new VHDFile(rawContent.length, 523557791)
|
||||
await f.writeBuffer(rawContent)
|
||||
await f.writeFile(vhdFileName)
|
||||
await exec(
|
||||
'qemu-img convert -fvpc -Oraw ' +
|
||||
vhdFileName +
|
||||
' ' +
|
||||
reconvertedRawFilemane
|
||||
)
|
||||
return exec('qemu-img compare ' + vmdkFileName + ' ' + vhdFileName).catch(
|
||||
error => {
|
||||
console.error(error.stdout)
|
||||
console.error(error.stderr)
|
||||
console.error(vhdFileName, vmdkFileName, error.message)
|
||||
|
||||
throw error
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
it('can convert a random data file with VMDKDirectParser', async () => {
|
||||
const inputRawFileName = 'random-data.raw'
|
||||
const vmdkFileName = 'random-data.vmdk'
|
||||
const vhdFileName = 'from-vmdk-VMDKDirectParser.vhd'
|
||||
const reconvertedRawFilemane = 'from-vhd.raw'
|
||||
const reconvertedByVBoxRawFilemane = 'from-vhd-by-vbox.raw'
|
||||
const dataSize = computeGeometryForSize(8 * 1024 * 1024).actualSize
|
||||
await exec(
|
||||
'rm -f ' +
|
||||
[
|
||||
inputRawFileName,
|
||||
vmdkFileName,
|
||||
vhdFileName,
|
||||
reconvertedRawFilemane,
|
||||
reconvertedByVBoxRawFilemane,
|
||||
].join(' ')
|
||||
)
|
||||
await exec(
|
||||
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
|
||||
)
|
||||
await exec(
|
||||
'VBoxManage convertfromraw --format VMDK --variant Stream ' +
|
||||
inputRawFileName +
|
||||
' ' +
|
||||
vmdkFileName
|
||||
)
|
||||
const pipe = (await convertFromVMDK(createReadStream(vmdkFileName))).pipe(
|
||||
createWriteStream(vhdFileName)
|
||||
)
|
||||
await new Promise((resolve, reject) => {
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
await exec(
|
||||
'qemu-img convert -fvmdk -Oraw ' +
|
||||
vmdkFileName +
|
||||
' ' +
|
||||
reconvertedByVBoxRawFilemane
|
||||
)
|
||||
await exec(
|
||||
'qemu-img convert -fvpc -Oraw ' +
|
||||
vhdFileName +
|
||||
' ' +
|
||||
reconvertedRawFilemane
|
||||
)
|
||||
return exec(
|
||||
'qemu-img compare ' +
|
||||
reconvertedByVBoxRawFilemane +
|
||||
' ' +
|
||||
reconvertedRawFilemane
|
||||
).catch(error => {
|
||||
console.error(error.stdout)
|
||||
console.error(error.stderr)
|
||||
console.error(vhdFileName, vmdkFileName, error.message)
|
||||
|
||||
throw error
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-web",
|
||||
"version": "5.18.3",
|
||||
"version": "5.19.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Web interface client for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -33,6 +33,7 @@
|
||||
"@julien-f/freactal": "0.1.0",
|
||||
"@nraynaud/novnc": "0.6.1",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"xo-vmdk-to-vhd": "0.0.12",
|
||||
"ansi_up": "^3.0.0",
|
||||
"asap": "^2.0.6",
|
||||
"babel-core": "^6.26.0",
|
||||
@@ -125,7 +126,6 @@
|
||||
"rimraf": "^2.6.2",
|
||||
"semver": "^5.4.1",
|
||||
"styled-components": "^3.1.5",
|
||||
"tar-stream": "^1.5.5",
|
||||
"uglify-es": "^3.3.4",
|
||||
"uncontrollable-input": "^0.1.1",
|
||||
"url-parse": "^1.2.0",
|
||||
|
||||
@@ -540,10 +540,11 @@ const messages = {
|
||||
|
||||
// ----- SR advanced tab -----
|
||||
|
||||
srUnhealthyVdiDepth: 'Depth',
|
||||
srUnhealthyVdiNameLabel: 'Name',
|
||||
srUnhealthyVdiSize: 'Size',
|
||||
srUnhealthyVdiDepth: 'Depth',
|
||||
srUnhealthyVdiTitle: 'VDI to coalesce ({total, number})',
|
||||
srUnhealthyVdiUuid: 'UUID',
|
||||
|
||||
// ----- SR stats tab -----
|
||||
|
||||
@@ -1037,6 +1038,8 @@ const messages = {
|
||||
vmNameLabel: 'Name',
|
||||
vmNameDescription: 'Description',
|
||||
vmContainer: 'Resident on',
|
||||
vmSnapshotsRelatedToNonExistentBackups:
|
||||
'VM snapshots related to non-existent backups',
|
||||
alarmMessage: 'Alarms',
|
||||
noAlarms: 'No alarms',
|
||||
alarmDate: 'Date',
|
||||
|
||||
@@ -5,6 +5,7 @@ import { startsWith } from 'lodash'
|
||||
import Icon from './icon'
|
||||
import propTypes from './prop-types-decorator'
|
||||
import { createGetObject } from './selectors'
|
||||
import { FormattedDate } from 'react-intl'
|
||||
import { isSrWritable } from './xo'
|
||||
import { connectStore, formatSize } from './utils'
|
||||
|
||||
@@ -203,10 +204,29 @@ const xoItemToRender = {
|
||||
: group.name_label}
|
||||
</span>
|
||||
),
|
||||
|
||||
backup: backup => (
|
||||
<span>
|
||||
<span className='tag tag-info' style={{ textTransform: 'capitalize' }}>
|
||||
{backup.mode}
|
||||
</span>{' '}
|
||||
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
|
||||
<FormattedDate
|
||||
value={new Date(backup.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
</span>
|
||||
),
|
||||
}
|
||||
|
||||
const renderXoItem = (item, { className } = {}) => {
|
||||
const { id, type, label } = item
|
||||
const renderXoItem = (item, { className, type: xoType } = {}) => {
|
||||
const { id, label } = item
|
||||
const type = xoType || item.type
|
||||
|
||||
if (item.removed) {
|
||||
return (
|
||||
@@ -245,6 +265,9 @@ const renderXoItem = (item, { className } = {}) => {
|
||||
|
||||
export { renderXoItem as default }
|
||||
|
||||
export const getRenderXoItemOfType = type => (item, options = {}) =>
|
||||
renderXoItem(item, { ...options, type })
|
||||
|
||||
const GenericXoItem = connectStore(() => {
|
||||
const getObject = createGetObject()
|
||||
|
||||
|
||||
@@ -495,7 +495,8 @@ export const createDoesHostNeedRestart = hostSelector => {
|
||||
)
|
||||
)
|
||||
.find([
|
||||
({ guidance }) =>
|
||||
({ guidance, upgrade }) =>
|
||||
upgrade ||
|
||||
find(
|
||||
guidance,
|
||||
action => action === 'restartHost' || action === 'restartXapi'
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
import ChartistGraph from 'react-chartist'
|
||||
import ChartistLegend from 'chartist-plugin-legend'
|
||||
import ChartistTooltip from 'chartist-plugin-tooltip'
|
||||
import humanFormat from 'human-format'
|
||||
import React from 'react'
|
||||
import { injectIntl } from 'react-intl'
|
||||
import { messages } from 'intl'
|
||||
import { find, flatten, floor, get, map, max, size, sum, values } from 'lodash'
|
||||
import {
|
||||
find,
|
||||
flatten,
|
||||
floor,
|
||||
get,
|
||||
map,
|
||||
max,
|
||||
round,
|
||||
size,
|
||||
sum,
|
||||
values,
|
||||
} from 'lodash'
|
||||
|
||||
import propTypes from '../prop-types-decorator'
|
||||
import { computeArraysSum } from '../xo-stats'
|
||||
@@ -87,9 +99,9 @@ const makeLabelInterpolationFnc = (intl, nValues, endTimestamp, interval) => {
|
||||
return (value, index) =>
|
||||
index % labelSpace === 0
|
||||
? intl.formatTime(
|
||||
(endTimestamp - (nValues - index - 1) * interval) * 1000,
|
||||
format
|
||||
)
|
||||
(endTimestamp - (nValues - index - 1) * interval) * 1000,
|
||||
format
|
||||
)
|
||||
: null
|
||||
}
|
||||
|
||||
@@ -441,19 +453,19 @@ export const PoolPifLineChart = injectIntl(
|
||||
|
||||
const series = addSumSeries
|
||||
? map(ios, io => ({
|
||||
name: `${intl.formatMessage(messages.poolAllHosts)} (${io})`,
|
||||
data: computeArraysSum(
|
||||
map(data, ({ stats }) => computeArraysSum(stats.pifs[io]))
|
||||
),
|
||||
}))
|
||||
name: `${intl.formatMessage(messages.poolAllHosts)} (${io})`,
|
||||
data: computeArraysSum(
|
||||
map(data, ({ stats }) => computeArraysSum(stats.pifs[io]))
|
||||
),
|
||||
}))
|
||||
: flatten(
|
||||
map(data, ({ stats, host }) =>
|
||||
map(ios, io => ({
|
||||
name: `${host} (${io})`,
|
||||
data: computeArraysSum(stats.pifs[io]),
|
||||
}))
|
||||
map(data, ({ stats, host }) =>
|
||||
map(ios, io => ({
|
||||
name: `${host} (${io})`,
|
||||
data: computeArraysSum(stats.pifs[io]),
|
||||
}))
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
return (
|
||||
<ChartistGraph
|
||||
@@ -604,7 +616,11 @@ export const IopsLineChart = injectIntl(
|
||||
nValues: length,
|
||||
endTimestamp,
|
||||
interval,
|
||||
valueTransform: value => `${value.toPrecision(3)} /s`,
|
||||
valueTransform: value =>
|
||||
humanFormat(value, {
|
||||
decimals: 3,
|
||||
unit: 'IOPS',
|
||||
}),
|
||||
}),
|
||||
...options,
|
||||
}}
|
||||
@@ -721,7 +737,7 @@ export const IowaitChart = injectIntl(
|
||||
nValues: length,
|
||||
endTimestamp,
|
||||
interval,
|
||||
valueTransform: value => `${value.toPrecision(2)}%`,
|
||||
valueTransform: value => `${round(value, 2)}%`,
|
||||
}),
|
||||
...options,
|
||||
}}
|
||||
|
||||
@@ -1918,6 +1918,22 @@ export const fetchFiles = (remote, disk, partition, paths, format) =>
|
||||
window.location = `.${url}`
|
||||
})
|
||||
|
||||
// File restore NG ----------------------------------------------------
|
||||
|
||||
export const listPartitions = (remote, disk) =>
|
||||
_call('backupNg.listPartitions', resolveIds({ remote, disk }))
|
||||
|
||||
export const listFiles = (remote, disk, path, partition) =>
|
||||
_call('backupNg.listFiles', resolveIds({ remote, disk, path, partition }))
|
||||
|
||||
export const fetchFilesNg = (remote, disk, partition, paths, format) =>
|
||||
_call(
|
||||
'backupNg.fetchFiles',
|
||||
resolveIds({ remote, disk, partition, paths, format })
|
||||
).then(({ $getFrom: url }) => {
|
||||
window.location = `.${url}`
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const probeSrNfs = (host, server) =>
|
||||
|
||||
@@ -41,7 +41,7 @@ export function exposeTrial (trial) {
|
||||
|
||||
export function blockXoaAccess (xoaState) {
|
||||
let block = xoaState.state === 'untrustedTrial'
|
||||
if (process.env.XOA_PLAN > 1 && process.env.XOA_PLAN < 5) {
|
||||
if (process.env.XOA_PLAN <= 1 || process.env.XOA_PLAN >= 5) {
|
||||
block = block || xoaState.state === 'ERROR'
|
||||
}
|
||||
return block
|
||||
|
||||
@@ -1,8 +1,233 @@
|
||||
import _ from 'intl'
|
||||
import ActionButton from 'action-button'
|
||||
import Component from 'base-component'
|
||||
import Icon from 'icon'
|
||||
import React from 'react'
|
||||
import SortedTable from 'sorted-table'
|
||||
import Upgrade from 'xoa-upgrade'
|
||||
import { addSubscriptions, noop } from 'utils'
|
||||
import { confirm } from 'modal'
|
||||
import { error } from 'notification'
|
||||
import { FormattedDate } from 'react-intl'
|
||||
import {
|
||||
deleteBackups,
|
||||
fetchFilesNg as fetchFiles,
|
||||
listVmBackups,
|
||||
subscribeRemotes,
|
||||
} from 'xo'
|
||||
import {
|
||||
assign,
|
||||
filter,
|
||||
flatMap,
|
||||
forEach,
|
||||
keyBy,
|
||||
map,
|
||||
reduce,
|
||||
toArray,
|
||||
} from 'lodash'
|
||||
|
||||
import DeleteBackupsModalBody from '../restore/delete-backups-modal-body'
|
||||
import RestoreFileModalBody from './restore-file-modal'
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const BACKUPS_COLUMNS = [
|
||||
{
|
||||
name: _('backupVmNameColumn'),
|
||||
itemRenderer: ({ last }) => last.vm.name_label,
|
||||
sortCriteria: 'last.vm.name_label',
|
||||
},
|
||||
{
|
||||
name: _('backupVmDescriptionColumn'),
|
||||
itemRenderer: ({ last }) => last.vm.name_description,
|
||||
sortCriteria: 'last.vm.name_description',
|
||||
},
|
||||
{
|
||||
name: _('firstBackupColumn'),
|
||||
itemRenderer: ({ first }) => (
|
||||
<FormattedDate
|
||||
value={new Date(first.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
),
|
||||
sortCriteria: 'first.timestamp',
|
||||
sortOrder: 'desc',
|
||||
},
|
||||
{
|
||||
name: _('lastBackupColumn'),
|
||||
itemRenderer: ({ last }) => (
|
||||
<FormattedDate
|
||||
value={new Date(last.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
),
|
||||
sortCriteria: 'last.timestamp',
|
||||
default: true,
|
||||
sortOrder: 'desc',
|
||||
},
|
||||
{
|
||||
name: _('availableBackupsColumn'),
|
||||
itemRenderer: ({ count }) => count,
|
||||
sortCriteria: 'count',
|
||||
},
|
||||
]
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@addSubscriptions({
|
||||
remotes: subscribeRemotes,
|
||||
})
|
||||
export default class Restore extends Component {
|
||||
state = {
|
||||
backupDataByVm: {},
|
||||
}
|
||||
|
||||
componentWillReceiveProps (props) {
|
||||
if (props.remotes !== this.props.remotes) {
|
||||
this._refreshBackupList(props.remotes)
|
||||
}
|
||||
}
|
||||
|
||||
_refreshBackupList = async (_ = this.props.remotes) => {
|
||||
const remotes = keyBy(filter(_, { enabled: true }), 'id')
|
||||
const backupsByRemote = await listVmBackups(toArray(remotes))
|
||||
|
||||
const backupDataByVm = {}
|
||||
forEach(backupsByRemote, (backups, remoteId) => {
|
||||
const remote = remotes[remoteId]
|
||||
forEach(backups, (vmBackups, vmId) => {
|
||||
vmBackups = filter(vmBackups, { mode: 'delta' })
|
||||
if (vmBackups.length === 0) {
|
||||
return
|
||||
}
|
||||
if (backupDataByVm[vmId] === undefined) {
|
||||
backupDataByVm[vmId] = { backups: [] }
|
||||
}
|
||||
|
||||
backupDataByVm[vmId].backups.push(
|
||||
...map(vmBackups, bkp => ({ ...bkp, remote }))
|
||||
)
|
||||
})
|
||||
})
|
||||
let first, last
|
||||
forEach(backupDataByVm, (data, vmId) => {
|
||||
first = { timestamp: Infinity }
|
||||
last = { timestamp: 0 }
|
||||
let count = 0 // Number since there's only 1 mode in file restore
|
||||
forEach(data.backups, backup => {
|
||||
if (backup.timestamp > last.timestamp) {
|
||||
last = backup
|
||||
}
|
||||
if (backup.timestamp < first.timestamp) {
|
||||
first = backup
|
||||
}
|
||||
count++
|
||||
})
|
||||
|
||||
assign(data, { first, last, count, id: vmId })
|
||||
})
|
||||
this.setState({ backupDataByVm })
|
||||
}
|
||||
|
||||
// Actions -------------------------------------------------------------------
|
||||
|
||||
_restore = ({ backups, last }) =>
|
||||
confirm({
|
||||
title: _('restoreFilesFromBackup', { name: last.vm.name_label }),
|
||||
body: (
|
||||
<RestoreFileModalBody vmName={last.vm.name_label} backups={backups} />
|
||||
),
|
||||
}).then(({ remote, disk, partition, paths, format }) => {
|
||||
if (
|
||||
remote === undefined ||
|
||||
disk === undefined ||
|
||||
paths === undefined ||
|
||||
paths.length === 0
|
||||
) {
|
||||
return error(_('restoreFiles'), _('restoreFilesError'))
|
||||
}
|
||||
return fetchFiles(remote, disk, partition, paths, format)
|
||||
}, noop)
|
||||
|
||||
_delete = data =>
|
||||
confirm({
|
||||
title: _('deleteVmBackupsTitle', { vm: data.last.vm.name_label }),
|
||||
body: <DeleteBackupsModalBody backups={data.backups} />,
|
||||
icon: 'delete',
|
||||
})
|
||||
.then(deleteBackups, noop)
|
||||
.then(() => this._refreshBackupList())
|
||||
|
||||
_bulkDelete = datas =>
|
||||
confirm({
|
||||
title: _('deleteVmBackupsBulkTitle'),
|
||||
body: <p>{_('deleteVmBackupsBulkMessage', { nVms: datas.length })}</p>,
|
||||
icon: 'delete',
|
||||
strongConfirm: {
|
||||
messageId: 'deleteVmBackupsBulkConfirmText',
|
||||
values: {
|
||||
nBackups: reduce(datas, (sum, data) => sum + data.backups.length, 0),
|
||||
},
|
||||
},
|
||||
})
|
||||
.then(() => deleteBackups(flatMap(datas, 'backups')), noop)
|
||||
.then(() => this._refreshBackupList())
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_actions = [
|
||||
{
|
||||
handler: this._bulkDelete,
|
||||
icon: 'delete',
|
||||
individualHandler: this._delete,
|
||||
label: _('deleteVmBackups'),
|
||||
level: 'danger',
|
||||
},
|
||||
]
|
||||
|
||||
_individualActions = [
|
||||
{
|
||||
handler: this._restore,
|
||||
icon: 'restore',
|
||||
label: _('restoreVmBackups'),
|
||||
level: 'primary',
|
||||
},
|
||||
]
|
||||
|
||||
export default class FileRestore extends Component {
|
||||
render () {
|
||||
return <p className='text-danger'>Available soon</p>
|
||||
return (
|
||||
<Upgrade place='restoreBackup' available={2}>
|
||||
<div>
|
||||
<div className='mb-1'>
|
||||
<ActionButton
|
||||
btnStyle='primary'
|
||||
handler={this._refreshBackupList}
|
||||
icon='refresh'
|
||||
>
|
||||
{_('restoreResfreshList')}
|
||||
</ActionButton>
|
||||
</div>
|
||||
<em>
|
||||
<Icon icon='info' /> {_('restoreDeltaBackupsInfo')}
|
||||
</em>
|
||||
<SortedTable
|
||||
actions={this._actions}
|
||||
collection={this.state.backupDataByVm}
|
||||
columns={BACKUPS_COLUMNS}
|
||||
individualActions={this._individualActions}
|
||||
/>
|
||||
</div>
|
||||
</Upgrade>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,379 @@
|
||||
import _ from 'intl'
|
||||
import ActionButton from 'action-button'
|
||||
import Component from 'base-component'
|
||||
import endsWith from 'lodash/endsWith'
|
||||
import Icon from 'icon'
|
||||
import React from 'react'
|
||||
import replace from 'lodash/replace'
|
||||
import Select from 'form/select'
|
||||
import Tooltip from 'tooltip'
|
||||
import { Container, Col, Row } from 'grid'
|
||||
import { createSelector } from 'reselect'
|
||||
import { formatSize } from 'utils'
|
||||
import { filter, includes, isEmpty, map } from 'lodash'
|
||||
import { getRenderXoItemOfType } from 'render-xo-item'
|
||||
import { listPartitions, listFiles } from 'xo'
|
||||
|
||||
const BACKUP_RENDERER = getRenderXoItemOfType('backup')
|
||||
|
||||
const partitionOptionRenderer = partition => (
|
||||
<span>
|
||||
{partition.name} {partition.type}{' '}
|
||||
{partition.size && `(${formatSize(+partition.size)})`}
|
||||
</span>
|
||||
)
|
||||
|
||||
const diskOptionRenderer = disk => <span>{disk.name}</span>
|
||||
|
||||
const fileOptionRenderer = file => <span>{file.name}</span>
|
||||
|
||||
const formatFilesOptions = (rawFiles, path) => {
|
||||
const files =
|
||||
path !== '/'
|
||||
? [
|
||||
{
|
||||
name: '..',
|
||||
id: '..',
|
||||
path: getParentPath(path),
|
||||
content: {},
|
||||
},
|
||||
]
|
||||
: []
|
||||
|
||||
return files.concat(
|
||||
map(rawFiles, (file, name) => ({
|
||||
name,
|
||||
id: `${path}${name}`,
|
||||
path: `${path}${name}`,
|
||||
content: file,
|
||||
}))
|
||||
)
|
||||
}
|
||||
|
||||
const getParentPath = path => replace(path, /^(\/+.+)*(\/+.+)/, '$1/')
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export default class RestoreFileModalBody extends Component {
|
||||
state = {
|
||||
format: 'zip',
|
||||
}
|
||||
|
||||
get value () {
|
||||
const { state } = this
|
||||
|
||||
return {
|
||||
disk: state.disk,
|
||||
format: state.format,
|
||||
partition: state.partition,
|
||||
paths: state.selectedFiles && map(state.selectedFiles, 'path'),
|
||||
remote: state.backup.remote.id,
|
||||
}
|
||||
}
|
||||
|
||||
_listFiles = () => {
|
||||
const { backup, disk, partition, path } = this.state
|
||||
this.setState({ scanningFiles: true })
|
||||
|
||||
return listFiles(backup.remote.id, disk, path, partition).then(
|
||||
rawFiles =>
|
||||
this.setState({
|
||||
files: formatFilesOptions(rawFiles, path),
|
||||
scanningFiles: false,
|
||||
listFilesError: false,
|
||||
}),
|
||||
error => {
|
||||
this.setState({
|
||||
scanningFiles: false,
|
||||
listFilesError: true,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_getSelectableFiles = createSelector(
|
||||
() => this.state.files,
|
||||
() => this.state.selectedFiles,
|
||||
(available, selected) =>
|
||||
filter(available, file => !includes(selected, file))
|
||||
)
|
||||
|
||||
_onBackupChange = backup => {
|
||||
this.setState({
|
||||
backup,
|
||||
disk: undefined,
|
||||
partition: undefined,
|
||||
file: undefined,
|
||||
selectedFiles: undefined,
|
||||
scanDiskError: false,
|
||||
listFilesError: false,
|
||||
})
|
||||
}
|
||||
|
||||
_onDiskChange = disk => {
|
||||
this.setState({
|
||||
partition: undefined,
|
||||
file: undefined,
|
||||
selectedFiles: undefined,
|
||||
scanDiskError: false,
|
||||
listFilesError: false,
|
||||
})
|
||||
|
||||
if (!disk) {
|
||||
return
|
||||
}
|
||||
|
||||
listPartitions(this.state.backup.remote.id, disk).then(
|
||||
partitions => {
|
||||
if (isEmpty(partitions)) {
|
||||
this.setState(
|
||||
{
|
||||
disk,
|
||||
path: '/',
|
||||
},
|
||||
this._listFiles
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
this.setState({
|
||||
disk,
|
||||
partitions,
|
||||
})
|
||||
},
|
||||
error => {
|
||||
this.setState({
|
||||
disk,
|
||||
scanDiskError: true,
|
||||
})
|
||||
throw error
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_onPartitionChange = partition => {
|
||||
this.setState(
|
||||
{
|
||||
partition,
|
||||
path: '/',
|
||||
file: undefined,
|
||||
selectedFiles: undefined,
|
||||
},
|
||||
partition && this._listFiles
|
||||
)
|
||||
}
|
||||
|
||||
_onFileChange = file => {
|
||||
if (file == null) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ugly workaround to keep the ReactSelect open after selecting a folder
|
||||
// FIXME: Remove once something better is implemented in react-select:
|
||||
// https://github.com/JedWatson/react-select/issues/1989
|
||||
const select = document.activeElement
|
||||
select.blur()
|
||||
select.focus()
|
||||
|
||||
const isFile = file.id !== '..' && !endsWith(file.path, '/')
|
||||
if (isFile) {
|
||||
const { selectedFiles } = this.state
|
||||
if (!includes(selectedFiles, file)) {
|
||||
this.setState({
|
||||
selectedFiles: (selectedFiles || []).concat(file),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
this.setState(
|
||||
{
|
||||
path: file.id === '..' ? getParentPath(this.state.path) : file.path,
|
||||
},
|
||||
this._listFiles
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
_unselectFile = file => {
|
||||
this.setState({
|
||||
selectedFiles: filter(
|
||||
this.state.selectedFiles,
|
||||
({ id }) => id !== file.id
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
_unselectAllFiles = () => {
|
||||
this.setState({
|
||||
selectedFiles: undefined,
|
||||
})
|
||||
}
|
||||
|
||||
_selectAllFolderFiles = () => {
|
||||
this.setState({
|
||||
selectedFiles: (this.state.selectedFiles || []).concat(
|
||||
filter(this._getSelectableFiles(), ({ path }) => !endsWith(path, '/'))
|
||||
),
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
render () {
|
||||
const { backups } = this.props
|
||||
const {
|
||||
backup,
|
||||
disk,
|
||||
format,
|
||||
partition,
|
||||
partitions,
|
||||
path,
|
||||
scanDiskError,
|
||||
listFilesError,
|
||||
scanningFiles,
|
||||
selectedFiles,
|
||||
} = this.state
|
||||
const noPartitions = isEmpty(partitions)
|
||||
|
||||
return (
|
||||
<div>
|
||||
<Select
|
||||
labelKey='name'
|
||||
onChange={this._onBackupChange}
|
||||
optionRenderer={BACKUP_RENDERER}
|
||||
options={backups}
|
||||
placeholder={_('restoreFilesSelectBackup')}
|
||||
value={backup}
|
||||
valueKey='id'
|
||||
/>
|
||||
{backup && [
|
||||
<br />,
|
||||
<Select
|
||||
labelKey='name'
|
||||
onChange={this._onDiskChange}
|
||||
optionRenderer={diskOptionRenderer}
|
||||
options={backup.disks}
|
||||
placeholder={_('restoreFilesSelectDisk')}
|
||||
value={disk}
|
||||
valueKey='id'
|
||||
/>,
|
||||
]}
|
||||
{scanDiskError && (
|
||||
<span>
|
||||
<Icon icon='error' /> {_('restoreFilesDiskError')}
|
||||
</span>
|
||||
)}
|
||||
{disk &&
|
||||
!scanDiskError &&
|
||||
!noPartitions && [
|
||||
<br />,
|
||||
<Select
|
||||
labelKey='name'
|
||||
onChange={this._onPartitionChange}
|
||||
optionRenderer={partitionOptionRenderer}
|
||||
options={partitions}
|
||||
placeholder={_('restoreFilesSelectPartition')}
|
||||
value={partition}
|
||||
valueKey='id'
|
||||
/>,
|
||||
]}
|
||||
{(partition || (disk && !scanDiskError && noPartitions)) && [
|
||||
<br />,
|
||||
<Container>
|
||||
<Row>
|
||||
<Col size={10}>
|
||||
<pre>
|
||||
{path} {scanningFiles && <Icon icon='loading' />}
|
||||
{listFilesError && <Icon icon='error' />}
|
||||
</pre>
|
||||
</Col>
|
||||
<Col size={2}>
|
||||
<span className='pull-right'>
|
||||
<Tooltip content={_('restoreFilesSelectAllFiles')}>
|
||||
<ActionButton
|
||||
handler={this._selectAllFolderFiles}
|
||||
icon='add'
|
||||
size='small'
|
||||
/>
|
||||
</Tooltip>
|
||||
</span>
|
||||
</Col>
|
||||
</Row>
|
||||
</Container>,
|
||||
<Select
|
||||
labelKey='name'
|
||||
onChange={this._onFileChange}
|
||||
optionRenderer={fileOptionRenderer}
|
||||
options={this._getSelectableFiles()}
|
||||
placeholder={_('restoreFilesSelectFiles')}
|
||||
value={null}
|
||||
valueKey='id'
|
||||
/>,
|
||||
<br />,
|
||||
<div>
|
||||
<span className='mr-1'>
|
||||
<input
|
||||
checked={format === 'zip'}
|
||||
name='format'
|
||||
onChange={this.linkState('format')}
|
||||
type='radio'
|
||||
value='zip'
|
||||
/>{' '}
|
||||
ZIP
|
||||
</span>
|
||||
<span>
|
||||
<input
|
||||
checked={format === 'tar'}
|
||||
name='format'
|
||||
onChange={this.linkState('format')}
|
||||
type='radio'
|
||||
value='tar'
|
||||
/>{' '}
|
||||
TAR
|
||||
</span>
|
||||
</div>,
|
||||
<br />,
|
||||
selectedFiles && selectedFiles.length ? (
|
||||
<Container>
|
||||
<Row>
|
||||
<Col className='pl-0 pb-1' size={10}>
|
||||
<em>
|
||||
{_('restoreFilesSelectedFiles', {
|
||||
files: selectedFiles.length,
|
||||
})}
|
||||
</em>
|
||||
</Col>
|
||||
<Col size={2} className='text-xs-right'>
|
||||
<ActionButton
|
||||
handler={this._unselectAllFiles}
|
||||
icon='remove'
|
||||
size='small'
|
||||
tooltip={_('restoreFilesUnselectAll')}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
{map(selectedFiles, file => (
|
||||
<Row key={file.id}>
|
||||
<Col size={10}>
|
||||
<pre>{file.path}</pre>
|
||||
</Col>
|
||||
<Col size={2} className='text-xs-right'>
|
||||
<ActionButton
|
||||
handler={this._unselectFile}
|
||||
handlerParam={file}
|
||||
icon='remove'
|
||||
size='small'
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
))}
|
||||
</Container>
|
||||
) : (
|
||||
<em>{_('restoreFilesNoFilesSelected')}</em>
|
||||
),
|
||||
]}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -184,7 +184,7 @@ const HEADER = (
|
||||
</Col>
|
||||
<Col mediumSize={9}>
|
||||
<NavTabs className='pull-right'>
|
||||
<NavLink exact to='/backup-ng'>
|
||||
<NavLink exact to='/backup-ng/overview'>
|
||||
<Icon icon='menu-backup-overview' /> {_('backupOverviewPage')}
|
||||
</NavLink>
|
||||
<NavLink to='/backup-ng/new'>
|
||||
@@ -203,9 +203,10 @@ const HEADER = (
|
||||
</Container>
|
||||
)
|
||||
|
||||
export default routes(Overview, {
|
||||
export default routes('overview', {
|
||||
':id/edit': Edit,
|
||||
new: New,
|
||||
overview: Overview,
|
||||
restore: Restore,
|
||||
'file-restore': FileRestore,
|
||||
})(({ children }) => (
|
||||
|
||||
@@ -2,10 +2,12 @@ import _ from 'intl'
|
||||
import classNames from 'classnames'
|
||||
import Component from 'base-component'
|
||||
import React from 'react'
|
||||
import { FormattedDate } from 'react-intl'
|
||||
import { forEach, map, orderBy } from 'lodash'
|
||||
import { createFilter, createSelector } from 'selectors'
|
||||
import { Toggle } from 'form'
|
||||
import { getRenderXoItemOfType } from 'render-xo-item'
|
||||
|
||||
const BACKUP_RENDERER = getRenderXoItemOfType('backup')
|
||||
|
||||
const _escapeDot = id => id.replace('.', '\0')
|
||||
|
||||
@@ -61,22 +63,7 @@ export default class DeleteBackupsModalBody extends Component {
|
||||
onClick={this.toggleState(_escapeDot(backup.id))}
|
||||
type='button'
|
||||
>
|
||||
<span
|
||||
className='tag tag-info'
|
||||
style={{ textTransform: 'capitalize' }}
|
||||
>
|
||||
{backup.mode}
|
||||
</span>{' '}
|
||||
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
|
||||
<FormattedDate
|
||||
value={new Date(backup.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
{BACKUP_RENDERER(backup)}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
@@ -2,9 +2,11 @@ import _ from 'intl'
|
||||
import React from 'react'
|
||||
import Component from 'base-component'
|
||||
import StateButton from 'state-button'
|
||||
import { getRenderXoItemOfType } from 'render-xo-item'
|
||||
import { Select, Toggle } from 'form'
|
||||
import { SelectSr } from 'select-objects'
|
||||
import { FormattedDate } from 'react-intl'
|
||||
|
||||
const BACKUP_RENDERER = getRenderXoItemOfType('backup')
|
||||
|
||||
export default class RestoreBackupsModalBody extends Component {
|
||||
get value () {
|
||||
@@ -15,26 +17,7 @@ export default class RestoreBackupsModalBody extends Component {
|
||||
<div>
|
||||
<div className='mb-1'>
|
||||
<Select
|
||||
optionRenderer={backup => (
|
||||
<span>
|
||||
<span
|
||||
className='tag tag-info'
|
||||
style={{ textTransform: 'capitalize' }}
|
||||
>
|
||||
{backup.mode}
|
||||
</span>{' '}
|
||||
<span className='tag tag-warning'>{backup.remote.name}</span>{' '}
|
||||
<FormattedDate
|
||||
value={new Date(backup.timestamp)}
|
||||
month='long'
|
||||
day='numeric'
|
||||
year='numeric'
|
||||
hour='2-digit'
|
||||
minute='2-digit'
|
||||
second='2-digit'
|
||||
/>
|
||||
</span>
|
||||
)}
|
||||
optionRenderer={BACKUP_RENDERER}
|
||||
options={this.props.data.backups}
|
||||
onChange={this.linkState('backup')}
|
||||
placeholder={_('importBackupModalSelectBackup')}
|
||||
|
||||
@@ -10,14 +10,20 @@ import TabButton from 'tab-button'
|
||||
import Tooltip from 'tooltip'
|
||||
import Upgrade from 'xoa-upgrade'
|
||||
import xml2js from 'xml2js'
|
||||
import { Card, CardHeader, CardBlock } from 'card'
|
||||
import { confirm } from 'modal'
|
||||
import { connectStore, formatSize, noop, resolveIds } from 'utils'
|
||||
import { Container, Row, Col } from 'grid'
|
||||
import { flatten, get, includes, isEmpty, map, mapValues } from 'lodash'
|
||||
import { FormattedRelative, FormattedTime } from 'react-intl'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { SelectPool } from 'select-objects'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { Container, Row, Col } from 'grid'
|
||||
import { Card, CardHeader, CardBlock } from 'card'
|
||||
import { FormattedRelative, FormattedTime } from 'react-intl'
|
||||
import { flatten, get, includes, isEmpty, map, mapValues } from 'lodash'
|
||||
import {
|
||||
addSubscriptions,
|
||||
connectStore,
|
||||
formatSize,
|
||||
noop,
|
||||
resolveIds,
|
||||
} from 'utils'
|
||||
import {
|
||||
deleteMessage,
|
||||
deleteOrphanedVdis,
|
||||
@@ -26,6 +32,7 @@ import {
|
||||
deleteVdi,
|
||||
deleteVm,
|
||||
isSrWritable,
|
||||
subscribeSchedules,
|
||||
} from 'xo'
|
||||
import {
|
||||
areObjectsFetched,
|
||||
@@ -383,6 +390,9 @@ const ALARM_COLUMNS = [
|
||||
},
|
||||
]
|
||||
|
||||
@addSubscriptions({
|
||||
schedules: subscribeSchedules,
|
||||
})
|
||||
@connectStore(() => {
|
||||
const getOrphanVdiSnapshots = createGetObjectsOfType('VDI-snapshot')
|
||||
.filter([_ => !_.$snapshot_of && _.$VBDs.length === 0])
|
||||
@@ -390,6 +400,15 @@ const ALARM_COLUMNS = [
|
||||
const getOrphanVmSnapshots = createGetObjectsOfType('VM-snapshot')
|
||||
.filter([snapshot => !snapshot.$snapshot_of])
|
||||
.sort()
|
||||
const getLoneBackupSnapshots = createGetObjectsOfType('VM-snapshot').filter(
|
||||
createSelector(
|
||||
createCollectionWrapper((_, props) => map(props.schedules, 'id')),
|
||||
scheduleIds => _ => {
|
||||
const scheduleId = _.other['xo:backup:schedule']
|
||||
return scheduleId !== undefined && !includes(scheduleIds, scheduleId)
|
||||
}
|
||||
)
|
||||
)
|
||||
const getUserSrs = createGetObjectsOfType('SR').filter([isSrWritable])
|
||||
const getVdiSrs = createGetObjectsOfType('SR').pick(
|
||||
createSelector(getOrphanVdiSnapshots, snapshots => map(snapshots, '$SR'))
|
||||
@@ -405,6 +424,7 @@ const ALARM_COLUMNS = [
|
||||
vdiOrphaned: getOrphanVdiSnapshots,
|
||||
vdiSr: getVdiSrs,
|
||||
vmOrphaned: getOrphanVmSnapshots,
|
||||
vmBackupSnapshots: getLoneBackupSnapshots,
|
||||
}
|
||||
})
|
||||
export default class Health extends Component {
|
||||
@@ -490,6 +510,11 @@ export default class Health extends Component {
|
||||
this._getPoolPredicate
|
||||
)
|
||||
|
||||
_getVmBackupSnapshots = createFilter(
|
||||
() => this.props.vmBackupSnapshots,
|
||||
this._getPoolPredicate
|
||||
)
|
||||
|
||||
_getAlertMessages = createFilter(
|
||||
() => this.props.alertMessages,
|
||||
this._getPoolPredicate
|
||||
@@ -610,6 +635,24 @@ export default class Health extends Component {
|
||||
</Card>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row className='snapshot-vms'>
|
||||
<Col>
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<Icon icon='vm' /> {_('vmSnapshotsRelatedToNonExistentBackups')}
|
||||
</CardHeader>
|
||||
<CardBlock>
|
||||
<NoObjects
|
||||
collection={this._getVmBackupSnapshots()}
|
||||
columns={VM_COLUMNS}
|
||||
component={SortedTable}
|
||||
emptyMessage={_('noSnapshots')}
|
||||
shortcutsTarget='.snapshot-vms'
|
||||
/>
|
||||
</CardBlock>
|
||||
</Card>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Col>
|
||||
<Card>
|
||||
|
||||
@@ -246,6 +246,13 @@ export default class Host extends Component {
|
||||
}
|
||||
/>{' '}
|
||||
<Text value={host.name_label} onChange={this._setNameLabel} />
|
||||
{this.props.needsRestart && (
|
||||
<Tooltip content={_('rebootUpdateHostLabel')}>
|
||||
<Link to={`/hosts/${host.id}/patches`}>
|
||||
<Icon icon='alarm' />
|
||||
</Link>
|
||||
</Tooltip>
|
||||
)}
|
||||
</h2>
|
||||
<span>
|
||||
<Text
|
||||
@@ -291,12 +298,6 @@ export default class Host extends Component {
|
||||
{missingPatches.length}
|
||||
</span>
|
||||
)}
|
||||
{this.props.needsRestart &&
|
||||
isEmpty(missingPatches) && (
|
||||
<Tooltip content={_('rebootUpdateHostLabel')}>
|
||||
<Icon icon='alarm' />
|
||||
</Tooltip>
|
||||
)}
|
||||
</NavLink>
|
||||
<NavLink to={`/hosts/${host.id}/logs`}>
|
||||
{_('logsTabName')}
|
||||
|
||||
@@ -181,16 +181,15 @@ export default class HostPatches extends Component {
|
||||
<Container>
|
||||
<Row>
|
||||
<Col className='text-xs-right'>
|
||||
{this.props.needsRestart &&
|
||||
isEmpty(missingPatches) && (
|
||||
<TabButton
|
||||
btnStyle='warning'
|
||||
handler={restartHost}
|
||||
handlerParam={host}
|
||||
icon='host-reboot'
|
||||
labelId='rebootUpdateHostLabel'
|
||||
/>
|
||||
)}
|
||||
{this.props.needsRestart && (
|
||||
<TabButton
|
||||
btnStyle='warning'
|
||||
handler={restartHost}
|
||||
handlerParam={host}
|
||||
icon='host-reboot'
|
||||
labelId='rebootUpdateHostLabel'
|
||||
/>
|
||||
)}
|
||||
<TabButton
|
||||
disabled={!hasMissingPatches}
|
||||
btnStyle={hasMissingPatches ? 'primary' : undefined}
|
||||
|
||||
@@ -203,9 +203,31 @@ export default class Menu extends Component {
|
||||
],
|
||||
},
|
||||
isAdmin && {
|
||||
to: '/backup-ng',
|
||||
to: '/backup-ng/overview',
|
||||
icon: 'menu-backup',
|
||||
label: ['Backup NG'],
|
||||
label: <span>Backup NG</span>,
|
||||
subMenu: [
|
||||
{
|
||||
to: '/backup-ng/overview',
|
||||
icon: 'menu-backup-overview',
|
||||
label: 'backupOverviewPage',
|
||||
},
|
||||
{
|
||||
to: '/backup-ng/new',
|
||||
icon: 'menu-backup-new',
|
||||
label: 'backupNewPage',
|
||||
},
|
||||
{
|
||||
to: '/backup-ng/restore',
|
||||
icon: 'menu-backup-restore',
|
||||
label: 'backupRestorePage',
|
||||
},
|
||||
{
|
||||
to: '/backup-ng/file-restore',
|
||||
icon: 'menu-backup-file-restore',
|
||||
label: 'backupFileRestorePage',
|
||||
},
|
||||
],
|
||||
},
|
||||
isAdmin && {
|
||||
to: 'xoa/update',
|
||||
|
||||
@@ -14,20 +14,25 @@ import { flowRight, isEmpty, keys, sum, values } from 'lodash'
|
||||
|
||||
const COLUMNS = [
|
||||
{
|
||||
itemRenderer: _ => <span>{_.name_label}</span>,
|
||||
name: _('srUnhealthyVdiNameLabel'),
|
||||
itemRenderer: vdi => <span>{vdi.name_label}</span>,
|
||||
sortCriteria: vdi => vdi.name_label,
|
||||
sortCriteria: 'name_label',
|
||||
},
|
||||
{
|
||||
name: _('srUnhealthyVdiSize'),
|
||||
itemRenderer: vdi => formatSize(vdi.size),
|
||||
sortCriteria: vdi => vdi.size,
|
||||
name: _('srUnhealthyVdiSize'),
|
||||
sortCriteria: 'size',
|
||||
},
|
||||
{
|
||||
name: _('srUnhealthyVdiDepth'),
|
||||
itemRenderer: (vdi, chains) => chains[vdi.uuid],
|
||||
name: _('srUnhealthyVdiDepth'),
|
||||
sortCriteria: (vdi, chains) => chains[vdi.uuid],
|
||||
},
|
||||
{
|
||||
itemRenderer: _ => <Copiable tagName='div'>{_.uuid}</Copiable>,
|
||||
name: _('srUnhealthyVdiUuid'),
|
||||
sortCriteria: 'uuid',
|
||||
},
|
||||
]
|
||||
|
||||
const UnhealthyVdiChains = flowRight(
|
||||
|
||||
@@ -101,6 +101,7 @@ class VmData extends Component {
|
||||
return network.id ? network.id : network
|
||||
}),
|
||||
nCpus: +refs.nCpus.value,
|
||||
tables: props.tables,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,52 +167,52 @@ class VmData extends Component {
|
||||
<Col mediumSize={6}>
|
||||
{!isEmpty(disks)
|
||||
? map(disks, (disk, diskId) => (
|
||||
<Row key={diskId}>
|
||||
<Col mediumSize={6}>
|
||||
<div className='form-group'>
|
||||
<label>
|
||||
{_('diskInfo', {
|
||||
position: `${disk.position}`,
|
||||
capacity: formatSize(disk.capacity),
|
||||
})}
|
||||
</label>
|
||||
<input
|
||||
className='form-control'
|
||||
ref={`disk-name-${diskId}`}
|
||||
defaultValue={disk.nameLabel}
|
||||
type='text'
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
</Col>
|
||||
<Col mediumSize={6}>
|
||||
<div className='form-group'>
|
||||
<label>{_('diskDescription')}</label>
|
||||
<input
|
||||
className='form-control'
|
||||
ref={`disk-description-${diskId}`}
|
||||
defaultValue={disk.descriptionLabel}
|
||||
type='text'
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
</Col>
|
||||
</Row>
|
||||
))
|
||||
<Row key={diskId}>
|
||||
<Col mediumSize={6}>
|
||||
<div className='form-group'>
|
||||
<label>
|
||||
{_('diskInfo', {
|
||||
position: `${disk.position}`,
|
||||
capacity: formatSize(disk.capacity),
|
||||
})}
|
||||
</label>
|
||||
<input
|
||||
className='form-control'
|
||||
ref={`disk-name-${diskId}`}
|
||||
defaultValue={disk.nameLabel}
|
||||
type='text'
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
</Col>
|
||||
<Col mediumSize={6}>
|
||||
<div className='form-group'>
|
||||
<label>{_('diskDescription')}</label>
|
||||
<input
|
||||
className='form-control'
|
||||
ref={`disk-description-${diskId}`}
|
||||
defaultValue={disk.descriptionLabel}
|
||||
type='text'
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
</Col>
|
||||
</Row>
|
||||
))
|
||||
: _('noDisks')}
|
||||
</Col>
|
||||
<Col mediumSize={6}>
|
||||
{networks.length > 0
|
||||
? map(networks, (name, networkId) => (
|
||||
<div className='form-group' key={networkId}>
|
||||
<label>{_('networkInfo', { name })}</label>
|
||||
<SelectNetwork
|
||||
defaultValue={defaultNetwork}
|
||||
ref={`network-${networkId}`}
|
||||
predicate={this._getNetworkPredicate()}
|
||||
/>
|
||||
</div>
|
||||
))
|
||||
<div className='form-group' key={networkId}>
|
||||
<label>{_('networkInfo', { name })}</label>
|
||||
<SelectNetwork
|
||||
defaultValue={defaultNetwork}
|
||||
ref={`network-${networkId}`}
|
||||
predicate={this._getNetworkPredicate()}
|
||||
/>
|
||||
</div>
|
||||
))
|
||||
: _('noNetworks')}
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import find from 'lodash/find'
|
||||
import forEach from 'lodash/forEach'
|
||||
import tar from 'tar-stream'
|
||||
import xml2js from 'xml2js'
|
||||
import { ensureArray, htmlFileToStream, streamToString } from 'utils'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { ensureArray } from 'utils'
|
||||
import { readVmdkGrainTable } from 'xo-vmdk-to-vhd'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -82,103 +83,119 @@ const filterDisks = disks => {
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
/* global FileReader, TextDecoder */
|
||||
|
||||
const parseOvaFile = file =>
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = htmlFileToStream(file)
|
||||
const extract = tar.extract()
|
||||
async function readFileFragment (file, start = 0, end) {
|
||||
const reader = new FileReader()
|
||||
reader.readAsArrayBuffer(file.slice(start, end))
|
||||
return (await fromEvent(reader, 'loadend')).target.result
|
||||
}
|
||||
|
||||
stream.on('error', reject)
|
||||
function parseTarHeader (header) {
|
||||
const textDecoder = new TextDecoder('ascii')
|
||||
const fileName = textDecoder.decode(header.slice(0, 100)).split('\0')[0]
|
||||
if (fileName.length === 0) {
|
||||
return null
|
||||
}
|
||||
const fileSize = parseInt(textDecoder.decode(header.slice(124, 124 + 11)), 8)
|
||||
return { fileName, fileSize }
|
||||
}
|
||||
|
||||
// tar module can work with bad tar files...
|
||||
// So it's necessary to reject at end of stream.
|
||||
extract.on('finish', () => {
|
||||
reject(new Error('No ovf file found.'))
|
||||
})
|
||||
extract.on('error', reject)
|
||||
extract.on('entry', ({ name }, stream, cb) => {
|
||||
// Not a XML file.
|
||||
const extIndex = name.lastIndexOf('.')
|
||||
if (extIndex === -1 || name.substring(extIndex + 1) !== 'ovf') {
|
||||
stream.on('end', cb)
|
||||
stream.resume()
|
||||
return
|
||||
}
|
||||
async function parseOVF (fileFragment) {
|
||||
const textDecoder = new TextDecoder('utf-8')
|
||||
const xmlString = textDecoder.decode(await readFileFragment(fileFragment))
|
||||
return new Promise((resolve, reject) =>
|
||||
xml2js.parseString(
|
||||
xmlString,
|
||||
{ mergeAttrs: true, explicitArray: false },
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
reject(err)
|
||||
return
|
||||
}
|
||||
|
||||
// XML file.
|
||||
streamToString(stream).then(xmlString => {
|
||||
xml2js.parseString(
|
||||
xmlString,
|
||||
{
|
||||
mergeAttrs: true,
|
||||
explicitArray: false,
|
||||
const {
|
||||
Envelope: {
|
||||
DiskSection: { Disk: disks },
|
||||
References: { File: files },
|
||||
VirtualSystem: system,
|
||||
},
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
reject(err)
|
||||
return
|
||||
}
|
||||
} = res
|
||||
|
||||
const {
|
||||
Envelope: {
|
||||
DiskSection: { Disk: disks },
|
||||
References: { File: files },
|
||||
VirtualSystem: system,
|
||||
},
|
||||
} = res
|
||||
const data = {
|
||||
disks: {},
|
||||
networks: [],
|
||||
}
|
||||
const hardware = system.VirtualHardwareSection
|
||||
|
||||
const data = {
|
||||
disks: {},
|
||||
networks: [],
|
||||
}
|
||||
const hardware = system.VirtualHardwareSection
|
||||
// Get VM name/description.
|
||||
data.nameLabel = hardware.System['vssd:VirtualSystemIdentifier']
|
||||
data.descriptionLabel =
|
||||
(system.AnnotationSection && system.AnnotationSection.Annotation) ||
|
||||
(system.OperatingSystemSection &&
|
||||
system.OperatingSystemSection.Description)
|
||||
|
||||
// Get VM name/description.
|
||||
data.nameLabel = hardware.System['vssd:VirtualSystemIdentifier']
|
||||
data.descriptionLabel =
|
||||
(system.AnnotationSection &&
|
||||
system.AnnotationSection.Annotation) ||
|
||||
(system.OperatingSystemSection &&
|
||||
system.OperatingSystemSection.Description)
|
||||
// Get disks.
|
||||
forEach(ensureArray(disks), disk => {
|
||||
const file = find(
|
||||
ensureArray(files),
|
||||
file => file['ovf:id'] === disk['ovf:fileRef']
|
||||
)
|
||||
const unit = disk['ovf:capacityAllocationUnits']
|
||||
|
||||
// Get disks.
|
||||
forEach(ensureArray(disks), disk => {
|
||||
const file = find(
|
||||
ensureArray(files),
|
||||
file => file['ovf:id'] === disk['ovf:fileRef']
|
||||
)
|
||||
const unit = disk['ovf:capacityAllocationUnits']
|
||||
|
||||
data.disks[disk['ovf:diskId']] = {
|
||||
capacity:
|
||||
disk['ovf:capacity'] *
|
||||
((unit && allocationUnitsToFactor(unit)) || 1),
|
||||
path: file && file['ovf:href'],
|
||||
}
|
||||
})
|
||||
|
||||
// Get hardware info: CPU, RAM, disks, networks...
|
||||
forEach(ensureArray(hardware.Item), item => {
|
||||
const handler =
|
||||
RESOURCE_TYPE_TO_HANDLER[item['rasd:ResourceType']]
|
||||
if (!handler) {
|
||||
return
|
||||
}
|
||||
handler(data, item)
|
||||
})
|
||||
|
||||
// Remove disks which not have a position.
|
||||
// (i.e. no info in hardware.Item section.)
|
||||
filterDisks(data.disks)
|
||||
|
||||
// Done!
|
||||
resolve(data)
|
||||
cb()
|
||||
data.disks[disk['ovf:diskId']] = {
|
||||
capacity:
|
||||
disk['ovf:capacity'] *
|
||||
((unit && allocationUnitsToFactor(unit)) || 1),
|
||||
path: file && file['ovf:href'],
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Get hardware info: CPU, RAM, disks, networks...
|
||||
forEach(ensureArray(hardware.Item), item => {
|
||||
const handler = RESOURCE_TYPE_TO_HANDLER[item['rasd:ResourceType']]
|
||||
if (!handler) {
|
||||
return
|
||||
}
|
||||
handler(data, item)
|
||||
})
|
||||
|
||||
// Remove disks which not have a position.
|
||||
// (i.e. no info in hardware.Item section.)
|
||||
filterDisks(data.disks)
|
||||
resolve(data)
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
async function parseTarFile (file) {
|
||||
let offset = 0
|
||||
const HEADER_SIZE = 512
|
||||
let data = { tables: {} }
|
||||
while (offset + HEADER_SIZE <= file.size) {
|
||||
const header = parseTarHeader(
|
||||
await readFileFragment(file, offset, offset + HEADER_SIZE)
|
||||
)
|
||||
offset += HEADER_SIZE
|
||||
if (header === null) {
|
||||
break
|
||||
}
|
||||
if (header.fileName.toLowerCase().endsWith('.ovf')) {
|
||||
const res = await parseOVF(file.slice(offset, offset + header.fileSize))
|
||||
data = { ...data, ...res }
|
||||
}
|
||||
if (header.fileName.toLowerCase().endsWith('.vmdk')) {
|
||||
const fileSlice = file.slice(offset, offset + header.fileSize)
|
||||
const readFile = async (start, end) =>
|
||||
readFileFragment(fileSlice, start, end)
|
||||
data.tables[header.fileName] = await readVmdkGrainTable(readFile)
|
||||
}
|
||||
offset += Math.ceil(header.fileSize / 512) * 512
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
const parseOvaFile = async file => parseTarFile(file)
|
||||
|
||||
stream.pipe(extract)
|
||||
})
|
||||
export { parseOvaFile as default }
|
||||
|
||||
206
yarn.lock
206
yarn.lock
@@ -501,6 +501,13 @@
|
||||
dependencies:
|
||||
regenerator-transform "^0.12.3"
|
||||
|
||||
"@babel/plugin-transform-runtime@^7.0.0-beta.44":
|
||||
version "7.0.0-beta.44"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.0.0-beta.44.tgz#13c7289c393425cc3bc99c9a0e836ca45f014c1f"
|
||||
dependencies:
|
||||
"@babel/helper-module-imports" "7.0.0-beta.44"
|
||||
"@babel/helper-plugin-utils" "7.0.0-beta.44"
|
||||
|
||||
"@babel/plugin-transform-shorthand-properties@7.0.0-beta.44":
|
||||
version "7.0.0-beta.44"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.0.0-beta.44.tgz#42e2a31aaa5edf479adaf4c2b677cd3457c99991"
|
||||
@@ -611,7 +618,7 @@
|
||||
pirates "^3.0.1"
|
||||
source-map-support "^0.4.2"
|
||||
|
||||
"@babel/runtime@^7.0.0-beta.39", "@babel/runtime@^7.0.0-beta.42":
|
||||
"@babel/runtime@^7.0.0-beta.39", "@babel/runtime@^7.0.0-beta.42", "@babel/runtime@^7.0.0-beta.44":
|
||||
version "7.0.0-beta.44"
|
||||
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.0.0-beta.44.tgz#ea5ad6c6fe9a2c1187b025bf42424d28050ee696"
|
||||
dependencies:
|
||||
@@ -671,7 +678,7 @@
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@julien-f/freactal/-/freactal-0.1.0.tgz#c3c97c1574ed82de6989f7f3c6110f34b0da3866"
|
||||
|
||||
"@marsaud/smb2-promise@^0.2.0", "@marsaud/smb2-promise@^0.2.1":
|
||||
"@marsaud/smb2-promise@^0.2.1":
|
||||
version "0.2.1"
|
||||
resolved "https://registry.yarnpkg.com/@marsaud/smb2-promise/-/smb2-promise-0.2.1.tgz#fee95f4baba6e4d930e8460d3377aa12560e0f0e"
|
||||
dependencies:
|
||||
@@ -701,20 +708,6 @@
|
||||
dependencies:
|
||||
pako "^1.0.3"
|
||||
|
||||
"@nraynaud/xo-fs@^0.0.5":
|
||||
version "0.0.5"
|
||||
resolved "https://registry.yarnpkg.com/@nraynaud/xo-fs/-/xo-fs-0.0.5.tgz#0f8c525440909223904b6841a37f4d255baa54b3"
|
||||
dependencies:
|
||||
"@marsaud/smb2-promise" "^0.2.0"
|
||||
event-to-promise "^0.7.0"
|
||||
execa "^0.4.0"
|
||||
fs-promise "^0.4.1"
|
||||
get-stream "^2.1.0"
|
||||
lodash "^4.13.1"
|
||||
promise-toolbox "^0.5.0"
|
||||
through2 "^2.0.0"
|
||||
xo-remote-parser "^0.3"
|
||||
|
||||
"@sindresorhus/is@^0.7.0":
|
||||
version "0.7.0"
|
||||
resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd"
|
||||
@@ -1260,6 +1253,13 @@ async-iterator-to-stream@^1.0.1:
|
||||
"@babel/runtime" "^7.0.0-beta.42"
|
||||
readable-stream "^2.3.5"
|
||||
|
||||
async-iterator-to-stream@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/async-iterator-to-stream/-/async-iterator-to-stream-1.0.2.tgz#7d3497e441dd2997b7e3ef15f4c367a979692543"
|
||||
dependencies:
|
||||
"@babel/runtime" "^7.0.0-beta.42"
|
||||
readable-stream "^2.3.5"
|
||||
|
||||
async-limiter@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.0.tgz#78faed8c3d074ab81f22b4e985d79e8738f720f8"
|
||||
@@ -2424,10 +2424,6 @@ browser-resolve@^1.11.0, browser-resolve@^1.11.2, browser-resolve@^1.7.0:
|
||||
dependencies:
|
||||
resolve "1.1.7"
|
||||
|
||||
browser-stdout@1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.0.tgz#f351d32969d32fa5d7a5567154263d928ae3bd1f"
|
||||
|
||||
browser-unpack@^1.1.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/browser-unpack/-/browser-unpack-1.2.0.tgz#357aee31fc467831684d063e4355e070a782970d"
|
||||
@@ -2566,7 +2562,7 @@ buffer-alloc-unsafe@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.0.0.tgz#474aa88f34e7bc75fa311d2e6457409c5846c3fe"
|
||||
|
||||
buffer-crc32@^0.2.1, buffer-crc32@^0.2.13:
|
||||
buffer-crc32@^0.2.1, buffer-crc32@^0.2.13, buffer-crc32@~0.2.3:
|
||||
version "0.2.13"
|
||||
resolved "https://registry.yarnpkg.com/buffer-crc32/-/buffer-crc32-0.2.13.tgz#0d333e3f00eac50aa1454abd30ef8c2a5d9a7242"
|
||||
|
||||
@@ -3038,10 +3034,6 @@ commander@2, commander@2.15.x, commander@^2.11.0, commander@^2.8.1, commander@^2
|
||||
version "2.15.1"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.15.1.tgz#df46e867d0fc2aec66a34662b406a9ccafff5b0f"
|
||||
|
||||
commander@2.11.0:
|
||||
version "2.11.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.11.0.tgz#157152fd1e7a6c8d98a5b715cf376df928004563"
|
||||
|
||||
commander@~2.13.0:
|
||||
version "2.13.0"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c"
|
||||
@@ -3240,13 +3232,6 @@ cross-env@^5.1.1, cross-env@^5.1.3:
|
||||
cross-spawn "^5.1.0"
|
||||
is-windows "^1.0.0"
|
||||
|
||||
cross-spawn-async@^2.1.1:
|
||||
version "2.2.5"
|
||||
resolved "https://registry.yarnpkg.com/cross-spawn-async/-/cross-spawn-async-2.2.5.tgz#845ff0c0834a3ded9d160daca6d390906bb288cc"
|
||||
dependencies:
|
||||
lru-cache "^4.0.0"
|
||||
which "^1.2.8"
|
||||
|
||||
cross-spawn@^3.0.0:
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-3.0.1.tgz#1256037ecb9f0c5f79e3d6ef135e30770184b982"
|
||||
@@ -3646,7 +3631,7 @@ debug@2, debug@2.6.9, debug@^2.1.0, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8, de
|
||||
dependencies:
|
||||
ms "2.0.0"
|
||||
|
||||
debug@3.1.0, debug@3.X, debug@^3.1.0:
|
||||
debug@3.X, debug@^3.1.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261"
|
||||
dependencies:
|
||||
@@ -3846,10 +3831,6 @@ detective@^5.0.2:
|
||||
defined "^1.0.0"
|
||||
minimist "^1.1.1"
|
||||
|
||||
diff@3.3.1:
|
||||
version "3.3.1"
|
||||
resolved "https://registry.yarnpkg.com/diff/-/diff-3.3.1.tgz#aa8567a6eed03c531fc89d3f711cd0e5259dec75"
|
||||
|
||||
diff@^3.2.0:
|
||||
version "3.5.0"
|
||||
resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12"
|
||||
@@ -4026,12 +4007,6 @@ ee-first@1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
|
||||
|
||||
"egal@>= 1.3.0 < 2":
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/egal/-/egal-1.3.0.tgz#20a19cfa80ce9733f8413635d0042641f429187b"
|
||||
dependencies:
|
||||
kindof ">= 2.0.0 < 3"
|
||||
|
||||
ejs@^2.5.6:
|
||||
version "2.5.8"
|
||||
resolved "https://registry.yarnpkg.com/ejs/-/ejs-2.5.8.tgz#2ab6954619f225e6193b7ac5f7c39c48fefe4380"
|
||||
@@ -4201,7 +4176,7 @@ escape-html@~1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
|
||||
|
||||
escape-string-regexp@1.0.5, escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5:
|
||||
escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5:
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
|
||||
|
||||
@@ -4448,17 +4423,6 @@ execa@^0.10.0:
|
||||
signal-exit "^3.0.0"
|
||||
strip-eof "^1.0.0"
|
||||
|
||||
execa@^0.4.0:
|
||||
version "0.4.0"
|
||||
resolved "https://registry.yarnpkg.com/execa/-/execa-0.4.0.tgz#4eb6467a36a095fabb2970ff9d5e3fb7bce6ebc3"
|
||||
dependencies:
|
||||
cross-spawn-async "^2.1.1"
|
||||
is-stream "^1.1.0"
|
||||
npm-run-path "^1.0.0"
|
||||
object-assign "^4.0.1"
|
||||
path-key "^1.0.0"
|
||||
strip-eof "^1.0.0"
|
||||
|
||||
execa@^0.7.0:
|
||||
version "0.7.0"
|
||||
resolved "https://registry.yarnpkg.com/execa/-/execa-0.7.0.tgz#944becd34cc41ee32a63a9faf27ad5a65fc59777"
|
||||
@@ -4950,12 +4914,6 @@ fs-mkdirp-stream@^1.0.0:
|
||||
graceful-fs "^4.1.11"
|
||||
through2 "^2.0.3"
|
||||
|
||||
fs-promise@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/fs-promise/-/fs-promise-0.4.1.tgz#9d57aed89dbcea0fdb6d4cb9c2044aedd9722efd"
|
||||
dependencies:
|
||||
any-promise "^1.0.0"
|
||||
|
||||
fs-promise@^2.0.0, fs-promise@^2.0.3:
|
||||
version "2.0.3"
|
||||
resolved "https://registry.yarnpkg.com/fs-promise/-/fs-promise-2.0.3.tgz#f64e4f854bcf689aa8bddcba268916db3db46854"
|
||||
@@ -5070,13 +5028,6 @@ get-stream@3.0.0, get-stream@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14"
|
||||
|
||||
get-stream@^2.1.0:
|
||||
version "2.3.1"
|
||||
resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-2.3.1.tgz#5f38f93f346009666ee0150a054167f91bdd95de"
|
||||
dependencies:
|
||||
object-assign "^4.0.1"
|
||||
pinkie-promise "^2.0.0"
|
||||
|
||||
get-uri@^2.0.0:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/get-uri/-/get-uri-2.0.1.tgz#dbdcacacd8c608a38316869368117697a1631c59"
|
||||
@@ -5150,17 +5101,6 @@ glob-watcher@^5.0.0:
|
||||
just-debounce "^1.0.0"
|
||||
object.defaults "^1.1.0"
|
||||
|
||||
glob@7.1.2, glob@^7.0.0, glob@^7.0.3, glob@^7.0.5, glob@^7.1.0, glob@^7.1.1, glob@^7.1.2, glob@~7.1.1:
|
||||
version "7.1.2"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15"
|
||||
dependencies:
|
||||
fs.realpath "^1.0.0"
|
||||
inflight "^1.0.4"
|
||||
inherits "2"
|
||||
minimatch "^3.0.4"
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
glob@^6.0.1, glob@^6.0.4:
|
||||
version "6.0.4"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
|
||||
@@ -5171,6 +5111,17 @@ glob@^6.0.1, glob@^6.0.4:
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
glob@^7.0.0, glob@^7.0.3, glob@^7.0.5, glob@^7.1.0, glob@^7.1.1, glob@^7.1.2, glob@~7.1.1:
|
||||
version "7.1.2"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.2.tgz#c19c9df9a028702d678612384a6552404c636d15"
|
||||
dependencies:
|
||||
fs.realpath "^1.0.0"
|
||||
inflight "^1.0.4"
|
||||
inherits "2"
|
||||
minimatch "^3.0.4"
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
global-modules@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-1.0.0.tgz#6d770f0eb523ac78164d72b5e71a8877265cc3ea"
|
||||
@@ -5270,10 +5221,6 @@ graceful-fs@~3.0.2:
|
||||
dependencies:
|
||||
natives "^1.1.0"
|
||||
|
||||
growl@1.10.3:
|
||||
version "1.10.3"
|
||||
resolved "https://registry.yarnpkg.com/growl/-/growl-1.10.3.tgz#1926ba90cf3edfe2adb4927f5880bc22c66c790f"
|
||||
|
||||
growly@^1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081"
|
||||
@@ -5508,10 +5455,6 @@ has-flag@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-1.0.0.tgz#9d9e793165ce017a00f00418c43f942a7b1d11fa"
|
||||
|
||||
has-flag@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-2.0.0.tgz#e8207af1cc7b30d446cc70b734b5e8be18f88d51"
|
||||
|
||||
has-flag@^3.0.0:
|
||||
version "3.0.0"
|
||||
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
|
||||
@@ -5623,7 +5566,7 @@ hawk@~6.0.2:
|
||||
hoek "4.x.x"
|
||||
sntp "2.x.x"
|
||||
|
||||
he@1.1.1, he@1.1.x:
|
||||
he@1.1.x:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd"
|
||||
|
||||
@@ -6904,7 +6847,7 @@ json-stable-stringify@~0.0.0:
|
||||
dependencies:
|
||||
jsonify "~0.0.0"
|
||||
|
||||
"json-stringify-safe@>= 5 < 6", json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1:
|
||||
json-stringify-safe@^5.0.1, json-stringify-safe@~5.0.1:
|
||||
version "5.0.1"
|
||||
resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb"
|
||||
|
||||
@@ -7046,7 +6989,7 @@ kind-of@^6.0.0, kind-of@^6.0.2:
|
||||
version "6.0.2"
|
||||
resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051"
|
||||
|
||||
"kindof@>= 2.0.0 < 3", kindof@^2.0.0:
|
||||
kindof@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/kindof/-/kindof-2.0.0.tgz#c335baf603a77cc37f8b406b73b6463fdbdf1abe"
|
||||
|
||||
@@ -7368,12 +7311,6 @@ lodash._bindcallback@^3.0.0:
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash._bindcallback/-/lodash._bindcallback-3.0.1.tgz#e531c27644cf8b57a99e17ed95b35c748789392e"
|
||||
|
||||
lodash._createwrapper@^3.0.0:
|
||||
version "3.2.0"
|
||||
resolved "https://registry.yarnpkg.com/lodash._createwrapper/-/lodash._createwrapper-3.2.0.tgz#df453e664163217b895a454065af1c47a0ea3c4d"
|
||||
dependencies:
|
||||
lodash._root "^3.0.0"
|
||||
|
||||
lodash._getnative@^3.0.0:
|
||||
version "3.9.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5"
|
||||
@@ -7510,12 +7447,6 @@ lodash.uniq@^4.5.0:
|
||||
version "4.5.0"
|
||||
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
|
||||
|
||||
"lodash.wrap@>= 3 < 4":
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/lodash.wrap/-/lodash.wrap-3.0.1.tgz#3fcd8bef867b2ec8c21bac638d816180593f71ab"
|
||||
dependencies:
|
||||
lodash._createwrapper "^3.0.0"
|
||||
|
||||
lodash@^4.0.0, lodash@^4.13.1, lodash@^4.14.0, lodash@^4.15.0, lodash@^4.16.0, lodash@^4.16.2, lodash@^4.16.6, lodash@^4.17.2, lodash@^4.17.4, lodash@^4.17.5, lodash@^4.2.0, lodash@^4.2.1, lodash@^4.3.0, lodash@^4.6.1, lodash@^4.8.0, lodash@~4.17.4:
|
||||
version "4.17.5"
|
||||
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.5.tgz#99a92d65c0272debe8c96b6057bc8fbfa3bed511"
|
||||
@@ -7567,7 +7498,7 @@ lru-cache@^2.6.5:
|
||||
version "2.7.3"
|
||||
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-2.7.3.tgz#6d4524e8b955f95d4f5b58851ce21dd72fb4e952"
|
||||
|
||||
lru-cache@^4.0.0, lru-cache@^4.0.1:
|
||||
lru-cache@^4.0.1:
|
||||
version "4.1.2"
|
||||
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-4.1.2.tgz#45234b2e6e2f2b33da125624c4664929a0224c3f"
|
||||
dependencies:
|
||||
@@ -7844,27 +7775,12 @@ mixin-deep@^1.2.0:
|
||||
for-in "^1.0.2"
|
||||
is-extendable "^1.0.1"
|
||||
|
||||
mkdirp@0.5, mkdirp@0.5.1, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.1:
|
||||
mkdirp@0.5, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.1:
|
||||
version "0.5.1"
|
||||
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903"
|
||||
dependencies:
|
||||
minimist "0.0.8"
|
||||
|
||||
mocha@^4.0.1:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/mocha/-/mocha-4.1.0.tgz#7d86cfbcf35cb829e2754c32e17355ec05338794"
|
||||
dependencies:
|
||||
browser-stdout "1.3.0"
|
||||
commander "2.11.0"
|
||||
debug "3.1.0"
|
||||
diff "3.3.1"
|
||||
escape-string-regexp "1.0.5"
|
||||
glob "7.1.2"
|
||||
growl "1.10.3"
|
||||
he "1.1.1"
|
||||
mkdirp "0.5.1"
|
||||
supports-color "4.4.0"
|
||||
|
||||
modular-css-core@^8.1.0:
|
||||
version "8.1.0"
|
||||
resolved "https://registry.yarnpkg.com/modular-css-core/-/modular-css-core-8.1.0.tgz#4a920ba3d762cbd53a911a4ec2b8f40c90f8e8be"
|
||||
@@ -7963,16 +7879,6 @@ multipipe@^0.1.2:
|
||||
dependencies:
|
||||
duplexer2 "0.0.2"
|
||||
|
||||
must@^0.13.2:
|
||||
version "0.13.4"
|
||||
resolved "https://registry.yarnpkg.com/must/-/must-0.13.4.tgz#9d1389f458e894b2aa04032bfeb7a40714f78171"
|
||||
dependencies:
|
||||
egal ">= 1.3.0 < 2"
|
||||
json-stringify-safe ">= 5 < 6"
|
||||
kindof ">= 2.0.0 < 3"
|
||||
lodash.wrap ">= 3 < 4"
|
||||
oolong ">= 1.11.0 < 2"
|
||||
|
||||
mute-stdout@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/mute-stdout/-/mute-stdout-1.0.0.tgz#5b32ea07eb43c9ded6130434cf926f46b2a7fd4d"
|
||||
@@ -8307,12 +8213,6 @@ now-and-later@^2.0.0:
|
||||
dependencies:
|
||||
once "^1.3.2"
|
||||
|
||||
npm-run-path@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-1.0.0.tgz#f5c32bf595fe81ae927daec52e82f8b000ac3c8f"
|
||||
dependencies:
|
||||
path-key "^1.0.0"
|
||||
|
||||
npm-run-path@^2.0.0:
|
||||
version "2.0.2"
|
||||
resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f"
|
||||
@@ -8492,10 +8392,6 @@ onetime@^2.0.0:
|
||||
dependencies:
|
||||
mimic-fn "^1.0.0"
|
||||
|
||||
"oolong@>= 1.11.0 < 2":
|
||||
version "1.15.1"
|
||||
resolved "https://registry.yarnpkg.com/oolong/-/oolong-1.15.1.tgz#90bac9e7ce52f60906ab2228d9e2b115f1086cea"
|
||||
|
||||
opencollective@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/opencollective/-/opencollective-1.0.3.tgz#aee6372bc28144583690c3ca8daecfc120dd0ef1"
|
||||
@@ -8830,10 +8726,6 @@ path-is-inside@^1.0.1, path-is-inside@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53"
|
||||
|
||||
path-key@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/path-key/-/path-key-1.0.0.tgz#5d53d578019646c0d68800db4e146e6bdc2ac7af"
|
||||
|
||||
path-key@^2.0.0, path-key@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40"
|
||||
@@ -9110,12 +9002,6 @@ promise-polyfill@^6.0.1:
|
||||
version "6.1.0"
|
||||
resolved "https://registry.yarnpkg.com/promise-polyfill/-/promise-polyfill-6.1.0.tgz#dfa96943ea9c121fca4de9b5868cb39d3472e057"
|
||||
|
||||
promise-toolbox@^0.5.0:
|
||||
version "0.5.1"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.5.1.tgz#23977649a1c501e39b3352b047718ed9fe54e194"
|
||||
dependencies:
|
||||
make-error "^1.0.4"
|
||||
|
||||
promise-toolbox@^0.8.0:
|
||||
version "0.8.3"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.8.3.tgz#b757232a21d246d8702df50da6784932dd0f5348"
|
||||
@@ -10170,12 +10056,18 @@ resolve@1.1.7:
|
||||
version "1.1.7"
|
||||
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b"
|
||||
|
||||
resolve@^1.1.4, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.3.2, resolve@^1.3.3, resolve@^1.4.0, resolve@^1.5.0:
|
||||
resolve@^1.1.4, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.3.3, resolve@^1.4.0, resolve@^1.5.0:
|
||||
version "1.7.0"
|
||||
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.7.0.tgz#2bdf5374811207285df0df652b78f118ab8f3c5e"
|
||||
dependencies:
|
||||
path-parse "^1.0.5"
|
||||
|
||||
resolve@^1.3.2:
|
||||
version "1.7.1"
|
||||
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.7.1.tgz#aadd656374fd298aee895bc026b8297418677fd3"
|
||||
dependencies:
|
||||
path-parse "^1.0.5"
|
||||
|
||||
responselike@1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7"
|
||||
@@ -10939,12 +10831,6 @@ superagent@^3.8.2:
|
||||
qs "^6.5.1"
|
||||
readable-stream "^2.0.5"
|
||||
|
||||
supports-color@4.4.0:
|
||||
version "4.4.0"
|
||||
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-4.4.0.tgz#883f7ddabc165142b2a61427f3352ded195d1a3e"
|
||||
dependencies:
|
||||
has-flag "^2.0.0"
|
||||
|
||||
supports-color@^0.2.0:
|
||||
version "0.2.0"
|
||||
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-0.2.0.tgz#d92de2694eb3f67323973d7ae3d8b55b4c22190a"
|
||||
@@ -11740,7 +11626,7 @@ which-pm-runs@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/which-pm-runs/-/which-pm-runs-1.0.0.tgz#670b3afbc552e0b55df6b7780ca74615f23ad1cb"
|
||||
|
||||
which@1, which@^1.2.12, which@^1.2.14, which@^1.2.8, which@^1.2.9, which@^1.3.0:
|
||||
which@1, which@^1.2.12, which@^1.2.14, which@^1.2.9, which@^1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/which/-/which-1.3.0.tgz#ff04bdfc010ee547d780bec38e1ac1c2777d253a"
|
||||
dependencies:
|
||||
@@ -12011,6 +11897,12 @@ yargs@~3.10.0:
|
||||
decamelize "^1.0.0"
|
||||
window-size "0.1.0"
|
||||
|
||||
yazl@^2.4.3:
|
||||
version "2.4.3"
|
||||
resolved "https://registry.yarnpkg.com/yazl/-/yazl-2.4.3.tgz#ec26e5cc87d5601b9df8432dbdd3cd2e5173a071"
|
||||
dependencies:
|
||||
buffer-crc32 "~0.2.3"
|
||||
|
||||
zip-stream@^1.2.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/zip-stream/-/zip-stream-1.2.0.tgz#a8bc45f4c1b49699c6b90198baacaacdbcd4ba04"
|
||||
|
||||
Reference in New Issue
Block a user