feat(xo-web,xo-server): ability to import a VHD/VMDK disk (#4138)

This commit is contained in:
Nicolas Raynaud
2019-11-28 11:35:31 +01:00
committed by Julien Fontanet
parent 14f0cbaec6
commit d69e61a634
23 changed files with 930 additions and 375 deletions

View File

@@ -10,6 +10,7 @@
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
- [SAML] Setting to disable requested authentication context (helps with _Active Directory_) (PR [#4675](https://github.com/vatesfr/xen-orchestra/pull/4675))
- The default sign-in page can be configured via `authentication.defaultSignInPage` (PR [#4678](https://github.com/vatesfr/xen-orchestra/pull/4678))
- [SR] Allow import of VHD and VMDK disks [#4137](https://github.com/vatesfr/xen-orchestra/issues/4137) (PR [#4138](https://github.com/vatesfr/xen-orchestra/pull/4138) )
### Bug fixes
@@ -26,6 +27,8 @@
>
> Rule of thumb: add packages on top.
- vhd-lib v0.7.2
- xo-vmdk-to-vhd v0.1.8
- xo-server-auth-saml v0.7.0
- xo-server-backup-reports v0.16.4
- @xen-orchestra/fs v0.10.2

View File

@@ -28,6 +28,7 @@
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"lodash": "^4.17.4",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},

View File

@@ -29,13 +29,13 @@ export default asyncIteratorToStream(async function*(size, blockParser) {
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.offsetBytes - position
const paddingLength = next.logicalAddressBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield* filePadding(paddingLength)
yield next.data
position = next.offsetBytes + next.data.length
position = next.logicalAddressBytes + next.data.length
}
yield* filePadding(actualSize - position)
yield footer

View File

@@ -1,5 +1,6 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forEachRight } from 'lodash'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
@@ -17,38 +18,65 @@ import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* Looks once backwards to collect the last fragment of each VHD block (they could be interleaved),
* then allocates the blocks in a forwards pass.
* @returns currentVhdPositionSector the first free sector after the data
*/
function createBAT(
firstBlockPosition,
blockAddressList,
fragmentLogicAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
const lastFragmentPerBlock = new Map()
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
)
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
}
})
return currentVhdPositionSector
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
// lastFragmentPerBlock is from last to first, so we go the other way around
forEachRight(
lastFragmentPerBlockArray,
([vhdTableIndex, _fragmentVirtualAddress]) => {
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
}
)
return [currentVhdPositionSector, lastFragmentPerBlock]
}
/**
* Receives an iterator of constant sized fragments, and a list of their address in virtual space, and returns
* a stream representing the VHD file of this disk.
* The fragment size should be an integer divider of the VHD block size.
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
* @param diskSize
* @param fragmentSize
* @param fragmentLogicalAddressList
* @param fragmentIterator
* @returns {Promise<Function>}
*/
export default async function createReadableStream(
diskSize,
incomingBlockSize,
blockAddressList,
blockIterator
fragmentSize,
fragmentLogicalAddressList,
fragmentIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
`Can't import file, grain size (${fragmentSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
@@ -80,60 +108,72 @@ export default async function createReadableStream(
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
const endOfData = createBAT(
const [endOfData, lastFragmentPerBlock] = createBAT(
firstBlockPosition,
blockAddressList,
fragmentLogicalAddressList,
ratio,
bat,
bitmapSize
)
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
let position = 0
function* yieldAndTrack(buffer, expectedPosition) {
function* yieldAndTrack(buffer, expectedPosition, reason) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition)
assert.strictEqual(position, expectedPosition, reason)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
async function* generateFileContent(blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield* yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
currentVhdBlockIndex = batIndex
}
const blockOffset =
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + (next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
)
function insertFragmentInBlock(fragment, blockWithBitmap) {
const fragmentOffsetInBlock =
(fragment.logicalAddressBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(blockWithBitmap, fragmentOffsetInBlock + bitPos)
}
fragment.data.copy(
blockWithBitmap,
bitmapSize + (fragment.logicalAddressBytes % VHD_BLOCK_SIZE_BYTES)
)
}
async function* generateBlocks(fragmentIterator, bitmapSize) {
let currentFragmentIndex = -1
// store blocks waiting for some of their fragments.
const batIndexToBlockMap = new Map()
for await (const fragment of fragmentIterator) {
currentFragmentIndex++
const batIndex = Math.floor(
fragment.logicalAddressBytes / VHD_BLOCK_SIZE_BYTES
)
let currentBlockWithBitmap = batIndexToBlockMap.get(batIndex)
if (currentBlockWithBitmap === undefined) {
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
batIndexToBlockMap.set(batIndex, currentBlockWithBitmap)
}
insertFragmentInBlock(fragment, currentBlockWithBitmap)
const batEntry = bat.readUInt32BE(batIndex * 4)
assert.notStrictEqual(batEntry, BLOCK_UNUSED)
const batPosition = batEntry * SECTOR_SIZE
if (lastFragmentPerBlock.get(batIndex) === fragment.logicalAddressBytes) {
batIndexToBlockMap.delete(batIndex)
yield* yieldAndTrack(
currentBlockWithBitmap,
batPosition,
`VHD block start index: ${currentFragmentIndex}`
)
}
}
yield* yieldAndTrack(currentBlockWithBitmap)
}
async function* iterator() {
yield* yieldAndTrack(footer, 0)
yield* yieldAndTrack(header, FOOTER_SIZE)
yield* yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield* generateFileContent(blockIterator, bitmapSize, ratio)
yield* generateBlocks(fragmentIterator, bitmapSize)
yield* yieldAndTrack(footer)
}

View File

@@ -10,3 +10,4 @@ export { default as createReadableSparseStream } from './createReadableSparseStr
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'

View File

@@ -0,0 +1,10 @@
import readChunk from './_readChunk'
import { FOOTER_SIZE } from './_constants'
import { fuFooter } from './_structs'
export default async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
}

View File

@@ -31,11 +31,11 @@ test('createFooter() does not crash', () => {
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
offsetBytes: 100,
logicalAddressBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 700,
logicalAddressBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -62,11 +62,11 @@ test('ReadableRawVHDStream does not crash', async () => {
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
offsetBytes: 700,
logicalAddressBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 100,
logicalAddressBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -97,11 +97,11 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
offsetBytes: blockSize * 3,
logicalAddressBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
offsetBytes: blockSize * 100,
logicalAddressBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
@@ -109,7 +109,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const stream = await createReadableSparseStream(
fileSize,
blockSize,
blocks.map(b => b.offsetBytes),
blocks.map(b => b.logicalAddressBytes),
blocks
)
expect(stream.length).toEqual(4197888)
@@ -128,7 +128,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)
b.data.copy(expected, b.logicalAddressBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -1,9 +1,12 @@
import createLogger from '@xen-orchestra/log'
import pump from 'pump'
import convertVmdkToVhdStream from 'xo-vmdk-to-vhd'
import { format, JsonRpcError } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors'
import { peekFooterFromVhdStream } from 'vhd-lib'
import { parseSize } from '../utils'
import { VDI_FORMAT_VHD } from '../xapi'
const log = createLogger('xo:disk')
@@ -165,3 +168,97 @@ resize.params = {
resize.resolve = {
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate'],
}
async function handleImport(
req,
res,
{ type, name, description, vmdkData, srId, xapi }
) {
req.setTimeout(43200000) // 12 hours
try {
req.length = req.headers['content-length']
let vhdStream, size
if (type === 'vmdk') {
vhdStream = await convertVmdkToVhdStream(
req,
vmdkData.grainLogicalAddressList,
vmdkData.grainFileOffsetList
)
size = vmdkData.capacity
} else if (type === 'vhd') {
vhdStream = req
const footer = await peekFooterFromVhdStream(req)
size = footer.currentSize
} else {
throw new Error(
`Unknown disk type, expected "vhd" or "vmdk", got ${type}`
)
}
const vdi = await xapi.createVdi({
name_description: description,
name_label: name,
size,
sr: srId,
})
try {
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
res.end(format.response(0, vdi.$id))
} catch (e) {
await xapi.deleteVdi(vdi)
throw e
}
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
}
}
// type is 'vhd' or 'vmdk'
async function importDisk({ sr, type, name, description, vmdkData }) {
return {
$sendTo: await this.registerHttpRequest(handleImport, {
description,
name,
srId: sr._xapiId,
type,
vmdkData,
xapi: this.getXapi(sr),
}),
}
}
export { importDisk as import }
importDisk.params = {
description: { type: 'string', optional: true },
name: { type: 'string' },
sr: { type: 'string' },
type: { type: 'string' },
vmdkData: {
type: 'object',
optional: true,
properties: {
capacity: { type: 'integer' },
grainLogicalAddressList: {
description:
'virtual address of the blocks on the disk (LBA), in order encountered in the VMDK',
type: 'array',
items: {
type: 'integer',
},
},
grainFileOffsetList: {
description:
'offset of the grains in the VMDK file, in order encountered in the VMDK',
optional: true,
type: 'array',
items: {
type: 'integer',
},
},
},
},
}
importDisk.resolve = {
sr: ['sr', 'SR', 'administrate'],
}

View File

@@ -1381,7 +1381,11 @@ export default class Xapi extends XapiBase {
}
const table = tables[entry.name]
const vhdStream = await vmdkToVhd(stream, table)
const vhdStream = await vmdkToVhd(
stream,
table.grainLogicalAddressList,
table.grainFileOffsetList
)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
// See: https://github.com/mafintosh/tar-stream#extracting

View File

@@ -1,5 +1,4 @@
# Some notes about the conversion
---
## File formats
VMDK and VHD file format share the same high level principles:
@@ -16,15 +15,22 @@ chunks.
[The VHD specification](http://download.microsoft.com/download/f/f/e/ffef50a5-07dd-4cf8-aaa3-442c0673a029/Virtual%20Hard%20Disk%20Format%20Spec_10_18_06.doc)
## A primer on VMDK
A VMDK file might contain more than one logical disk inside (sparse extent), a ascii header describes those disks.
## StreamOptimized VMDK
Each sparse extent contains "grains", whose address is designated into a "grain table". Said table is itself indexed by a "directory".
The grain table is not sparse, so the directory is useless (historical artifact).
### StreamOptimized VMDK
The streamOptimized VMDK file format was designed so that from a file on
disk an application can generate a VMDK file going forwards without ever
needing to seek() backwards. The idea is to:
needing to seek() backwards. The difference is that header, tables, directory, grains etc. are delimited by "markers"
and the table and directory are pushed at the end of the file and the grains are compressed.
The generation algorithm is:
- generate a header without a
directory address in it (-1),
- dump all the compressed chunks in the stream while generating the
- dump all the compressed grains in the stream while generating the
directory in memory
- dump the directory marker
- dump the directory and record its position
@@ -66,7 +72,7 @@ When scouring the internet for test files, we stumbled on [a strange OVA file](h
The VMDK contained in the OVA (which is a tar of various files), had a
few oddities:
- it declared having markers in it's header, but there were no marker
- it declared having markers in its header, but there were no marker
for its primary and secondary directory, nor for its footer
- its directories are at the top, and declared in the header.
- it declared being streamOptimized
@@ -91,3 +97,42 @@ one application an other.
The VHD stream doesn't declare its length, because that breaks the
downstream computation in xo-server, but with a fixed VHD file format,
we can pre-compute the exact file length and advertise it.
# The conversion from VMDK to VHD
In the browser we extract the grain table, that is a list of the file offset of all the grains and a list of the
logical address of all the grains (both lists are in the increasing offset order with matching indexes, we use to lists
for bandwidth reason). Those lists are sent to the server, where the VHD Block Allocation Table will be generated.
With the default parameters, there are 32 VMDK grains into a VHD block, so a late scheduling is used to create the BAT.
Once the BAT is generated, the VHD file is created on the fly block by block and sent on the socket towards the XAPI url.
## How VHD Block order and position is decided from the VMDK table
Let use letters to represent VHD Blocks, and number to represent their smaller VMDK constituents, and a ratio of 3 VMDK
fragment per VHD block.
`A` is the first VHD block, `A2` is the second VMDK fragment of the first VHD block.
In the VMDK file, fragments could be in any order and VHD blocks might not even be complete: `A3 E3 C1 C2 C3 A1 A2`.
We are trying to generate a VHD file while using the minimum intermediate memory possible.
When generating the VHD file Block Allocation Table we are setting in stone the order in which the block will be sent in
the VHD stream. Since we can't seek backwards in the VHD stream, we can't write a VHD block until all its VMDK fragments
have been read, so the last fragment encountered will dictate the order of the VHD Block in the file.
Let's review our previous example: `A3 E3 C1 C2 C3 A1 A2`, the block `B` doesn't appear, the block `A` has its fragment
interleaved with other blocks. So to decide the order of the blocks in the VHD file, we just go backwards and the last
time we see a block we can write it, the result of this backward collection is `A C E`:
- `A2` seen, collect `A`
- `A1` seen, skip because we already have A
- `C3` seen, collect `C`
- `C2` seen, skip
- `C1` seen, skip
- `E3` seen, collect `E`
- `A3` seen, skip (but we can infer how long we'll need to keep this fragment in memory).
We can now reverse our collection to `E C A`, and attribute addresses to the blocks, we could not do it before, because
we didn't know that `B` didn't exist or that `E` would be the first one.
When reading the VMDK file, we know that when we encounter `A3` we will have to keep it in memory until we meet `A2`.
But when we meet `E3`, we know that we can dump `E` on the VHD stream and release the memory for `E`.

View File

@@ -0,0 +1,89 @@
export const compressionDeflate = 'COMPRESSION_DEFLATE'
export const compressionNone = 'COMPRESSION_NONE'
const compressionMap = [compressionNone, compressionDeflate]
export function parseFlags(flagBuffer) {
const number = flagBuffer.readUInt32LE(0)
return {
newLineTest: !!(number & (1 << 0)),
useSecondaryGrain: !!(number & (1 << 1)),
useZeroedGrainTable: !!(number & (1 << 2)),
compressedGrains: !!(number & (1 << 16)),
hasMarkers: !!(number & (1 << 17)),
}
}
// actually reads 47 bits
function parseS64b(buffer, offset, valueName) {
const extraBits = buffer.readIntLE(offset + 6, 2)
const value = buffer.readIntLE(offset, 6)
const hadValueInHighBytes = !(extraBits === 0 || extraBits === -1)
const readWrongSign = Math.sign(value) * Math.sign(extraBits) < 0
if (hadValueInHighBytes || readWrongSign) {
throw new Error('Unsupported VMDK, ' + valueName + ' is too big')
}
return value
}
// reads 48bits
export function parseU64b(buffer, offset, valueName) {
const extraBits = buffer.readUIntLE(offset + 6, 2)
const value = buffer.readUIntLE(offset, 6)
if (extraBits > 0) {
throw new Error('Unsupported VMDK, ' + valueName + ' is too big')
}
return value
}
export function parseHeader(buffer) {
const magicString = buffer.slice(0, 4).toString('ascii')
if (magicString !== 'KDMV') {
throw new Error('not a VMDK file')
}
const version = buffer.readUInt32LE(4)
if (version !== 1 && version !== 3) {
throw new Error(
'unsupported VMDK version ' +
version +
', only version 1 and 3 are supported'
)
}
const flags = parseFlags(buffer.slice(8, 12))
const capacitySectors = parseU64b(buffer, 12, 'capacitySectors')
const grainSizeSectors = parseU64b(buffer, 20, 'grainSizeSectors')
const descriptorOffsetSectors = parseU64b(
buffer,
28,
'descriptorOffsetSectors'
)
const descriptorSizeSectors = parseU64b(buffer, 36, 'descriptorSizeSectors')
const numGTEsPerGT = buffer.readUInt32LE(44)
const rGrainDirectoryOffsetSectors = parseS64b(
buffer,
48,
'rGrainDirectoryOffsetSectors'
)
const grainDirectoryOffsetSectors = parseS64b(
buffer,
56,
'grainDirectoryOffsetSectors'
)
const overheadSectors = parseS64b(buffer, 64, 'overheadSectors')
const compressionMethod = compressionMap[buffer.readUInt16LE(77)]
const l1EntrySectors = numGTEsPerGT * grainSizeSectors
return {
magicString,
version,
flags,
compressionMethod,
grainSizeSectors,
overheadSectors,
capacitySectors,
descriptorOffsetSectors,
descriptorSizeSectors,
grainDirectoryOffsetSectors,
rGrainDirectoryOffsetSectors,
l1EntrySectors,
numGTEsPerGT,
}
}

View File

@@ -1,17 +1,27 @@
import { createReadableSparseStream } from 'vhd-lib'
import VMDKDirectParser from './vmdk-read'
import readVmdkGrainTable from './vmdk-read-table'
export {
default as readVmdkGrainTable,
readCapacityAndGrainTable,
} from './vmdk-read-table'
async function convertFromVMDK(vmdkReadStream, table) {
const parser = new VMDKDirectParser(vmdkReadStream)
async function convertFromVMDK(
vmdkReadStream,
grainLogicalAddressList,
grainFileOffsetList
) {
const parser = new VMDKDirectParser(
vmdkReadStream,
grainLogicalAddressList,
grainFileOffsetList
)
const header = await parser.readHeader()
return createReadableSparseStream(
header.capacitySectors * 512,
header.grainSizeSectors * 512,
table,
grainLogicalAddressList,
parser.blockIterator()
)
}
export { convertFromVMDK as default, readVmdkGrainTable }
export { convertFromVMDK as default }

View File

@@ -5,39 +5,53 @@ const DISK_CAPACITY_OFFSET = 12
const GRAIN_SIZE_OFFSET = 20
const NUM_GTE_PER_GT_OFFSET = 44
const GRAIN_ADDRESS_OFFSET = 56
const MANTISSA_BITS_IN_DOUBLE = 53
const getLongLong = (buffer, offset, name) => {
if (buffer.byteLength < offset + 8) {
throw new Error(
`buffer ${name} is too short, expecting ${offset + 8} minimum, got ${
buffer.byteLength
}`
)
}
const dataView = new DataView(buffer)
const highBits = dataView.getUint32(offset + 4, true)
if (highBits >= Math.pow(2, MANTISSA_BITS_IN_DOUBLE - 32)) {
throw new Error(
'Unsupported file, high order bits are too high in field ' + name
)
}
const res = dataView.getUint32(offset, true)
return res + highBits * Math.pow(2, 32)
}
/**
* the grain table is an object { grainLogicalAddressList: [number], grainFileOffsetList: [number] }
* grainLogicalAddressList contains the logical addresses of the grains in the file, in the order they are stored in the VMDK
* grainFileOffsetList contains the offsets of the grains in the VMDK file, in the order they are stored in the VMDK (so this array should be ascending)
*
* the grain table is the array of LBAs (in byte, not in sector) ordered by their position in the VDMK file
* THIS CODE RUNS ON THE BROWSER
*/
export default async function readVmdkGrainTable(fileAccessor) {
const getLongLong = (buffer, offset, name) => {
if (buffer.byteLength < offset + 8) {
throw new Error(
`buffer ${name} is too short, expecting ${offset + 8} minimum, got ${
buffer.byteLength
}`
)
}
const dataView = new DataView(buffer)
const res = dataView.getUint32(offset, true)
const highBits = dataView.getUint32(offset + 4, true)
const MANTISSA_BITS_IN_DOUBLE = 53
if (highBits >= Math.pow(2, MANTISSA_BITS_IN_DOUBLE - 32)) {
throw new Error(
'Unsupported file, high order bits are to high in field ' + name
)
}
return res + highBits * Math.pow(2, 32)
}
return (await readCapacityAndGrainTable(fileAccessor)).tablePromise
}
/***
*
* @param fileAccessor: (start, end) => ArrayBuffer
* @returns {Promise<{capacityBytes: number, tablePromise: Promise<{ grainLogicalAddressList: [number], grainFileOffsetList: [number] }>}>}
*/
export async function readCapacityAndGrainTable(fileAccessor) {
let headerBuffer = await fileAccessor(0, HEADER_SIZE)
let grainAddrBuffer = headerBuffer.slice(
GRAIN_ADDRESS_OFFSET,
GRAIN_ADDRESS_OFFSET + 8
)
if (
new Int8Array(grainAddrBuffer).reduce((acc, val) => acc && val === -1, true)
) {
if (new Int8Array(grainAddrBuffer).every(val => val === -1)) {
headerBuffer = await fileAccessor(
FOOTER_POSITION,
FOOTER_POSITION + HEADER_SIZE
@@ -47,51 +61,64 @@ export default async function readVmdkGrainTable(fileAccessor) {
GRAIN_ADDRESS_OFFSET + 8
)
}
const grainDirPosBytes =
getLongLong(grainAddrBuffer, 0, 'grain directory address') * SECTOR_SIZE
const capacity =
getLongLong(headerBuffer, DISK_CAPACITY_OFFSET, 'capacity') * SECTOR_SIZE
const grainSize =
getLongLong(headerBuffer, GRAIN_SIZE_OFFSET, 'grain size') * SECTOR_SIZE
const grainCount = Math.ceil(capacity / grainSize)
const numGTEsPerGT = new DataView(headerBuffer).getUint32(
NUM_GTE_PER_GT_OFFSET,
true
)
const grainTablePhysicalSize = numGTEsPerGT * 4
const grainDirectoryEntries = Math.ceil(grainCount / numGTEsPerGT)
const grainDirectoryPhysicalSize = grainDirectoryEntries * 4
const grainDir = new Uint32Array(
await fileAccessor(
grainDirPosBytes,
grainDirPosBytes + grainDirectoryPhysicalSize
async function readTable() {
const grainSizeByte =
getLongLong(headerBuffer, GRAIN_SIZE_OFFSET, 'grain size') * SECTOR_SIZE
const grainCount = Math.ceil(capacity / grainSizeByte)
const numGTEsPerGT = new DataView(headerBuffer).getUint32(
NUM_GTE_PER_GT_OFFSET,
true
)
)
const cachedGrainTables = []
for (let i = 0; i < grainDirectoryEntries; i++) {
const grainTableAddr = grainDir[i] * SECTOR_SIZE
if (grainTableAddr !== 0) {
cachedGrainTables[i] = new Uint32Array(
await fileAccessor(
grainTableAddr,
grainTableAddr + grainTablePhysicalSize
)
const grainTablePhysicalSize = numGTEsPerGT * 4
const grainDirectoryEntries = Math.ceil(grainCount / numGTEsPerGT)
const grainDirectoryPhysicalSize = grainDirectoryEntries * 4
const grainDir = new Uint32Array(
await fileAccessor(
grainDirPosBytes,
grainDirPosBytes + grainDirectoryPhysicalSize
)
}
}
const extractedGrainTable = []
for (let i = 0; i < grainCount; i++) {
const directoryEntry = Math.floor(i / numGTEsPerGT)
const grainTable = cachedGrainTables[directoryEntry]
if (grainTable !== undefined) {
const grainAddr = grainTable[i % numGTEsPerGT]
if (grainAddr !== 0) {
extractedGrainTable.push([i, grainAddr])
)
const cachedGrainTables = []
for (let i = 0; i < grainDirectoryEntries; i++) {
const grainTableAddr = grainDir[i] * SECTOR_SIZE
if (grainTableAddr !== 0) {
cachedGrainTables[i] = new Uint32Array(
await fileAccessor(
grainTableAddr,
grainTableAddr + grainTablePhysicalSize
)
)
}
}
const extractedGrainTable = []
for (let i = 0; i < grainCount; i++) {
const directoryEntry = Math.floor(i / numGTEsPerGT)
const grainTable = cachedGrainTables[directoryEntry]
if (grainTable !== undefined) {
const grainAddr = grainTable[i % numGTEsPerGT]
if (grainAddr !== 0) {
extractedGrainTable.push([i, grainAddr])
}
}
}
extractedGrainTable.sort(
([i1, grainAddress1], [_i2, grainAddress2]) =>
grainAddress1 - grainAddress2
)
const fragmentAddressList = extractedGrainTable.map(
([index, _grainAddress]) => index * grainSizeByte
)
const grainFileOffsetList = extractedGrainTable.map(
([_index, grainAddress]) => grainAddress * SECTOR_SIZE
)
return { grainLogicalAddressList: fragmentAddressList, grainFileOffsetList }
}
extractedGrainTable.sort(
([i1, grainAddress1], [i2, grainAddress2]) => grainAddress1 - grainAddress2
)
return extractedGrainTable.map(([index, grainAddress]) => index * grainSize)
return { tablePromise: readTable(), capacityBytes: capacity }
}

View File

@@ -1,12 +1,34 @@
/* eslint-env jest */
import { createReadStream } from 'fs-extra'
import { createReadStream, stat } from 'fs-extra'
import { exec } from 'child-process-promise'
import { pFromCallback } from 'promise-toolbox'
import rimraf from 'rimraf'
import tmp from 'tmp'
import VMDKDirectParser from './vmdk-read'
import getStream from 'get-stream'
import { readVmdkGrainTable } from './index'
// noinspection DuplicatedCode
function createFileAccessor(file) {
return async (start, end) => {
if (start < 0 || end < 0) {
const fileLength = (await stat(file)).size
start = start < 0 ? fileLength + start : start
end = end < 0 ? fileLength + end : end
}
const result = await getStream.buffer(
createReadStream(file, { start, end: end - 1 })
)
// crazy stuff to get a browser-compatible ArrayBuffer from a node buffer
// https://stackoverflow.com/a/31394257/72637
return result.buffer.slice(
result.byteOffset,
result.byteOffset + result.byteLength
)
}
}
jest.setTimeout(10000)
@@ -35,14 +57,21 @@ test('VMDKDirectParser reads OK', async () => {
' ' +
fileName
)
const parser = new VMDKDirectParser(createReadStream(fileName))
const data = await readVmdkGrainTable(createFileAccessor(fileName))
const parser = new VMDKDirectParser(
createReadStream(fileName),
data.grainLogicalAddressList,
data.grainFileOffsetList
)
const header = await parser.readHeader()
const harvested = []
for await (const res of parser.blockIterator()) {
harvested.push(res)
}
expect(harvested.length).toEqual(2)
expect(harvested[0].offsetBytes).toEqual(0)
expect(harvested[0].logicalAddressBytes).toEqual(0)
expect(harvested[0].data.length).toEqual(header.grainSizeSectors * 512)
expect(harvested[1].offsetBytes).toEqual(header.grainSizeSectors * 512)
expect(harvested[1].logicalAddressBytes).toEqual(
header.grainSizeSectors * 512
)
})

View File

@@ -1,38 +1,15 @@
// see https://github.com/babel/babel/issues/8450
import 'core-js/features/symbol/async-iterator'
import assert from 'assert'
import zlib from 'zlib'
import { compressionDeflate, parseHeader, parseU64b } from './definitions'
import { VirtualBuffer } from './virtual-buffer'
const SECTOR_SIZE = 512
const HEADER_SIZE = 512
const VERSION_OFFSET = 4
const compressionDeflate = 'COMPRESSION_DEFLATE'
const compressionNone = 'COMPRESSION_NONE'
const compressionMap = [compressionNone, compressionDeflate]
function parseS64b(buffer, offset, valueName) {
const low = buffer.readInt32LE(offset)
const high = buffer.readInt32LE(offset + 4)
// here there might be a surprise because we are reading 64 integers into double floats (53 bits mantissa)
const value = low | (high << 32)
if ((value & (Math.pow(2, 32) - 1)) !== low) {
throw new Error('Unsupported VMDK, ' + valueName + ' is too big')
}
return value
}
function parseU64b(buffer, offset, valueName) {
const low = buffer.readUInt32LE(offset)
const high = buffer.readUInt32LE(offset + 4)
// here there might be a surprise because we are reading 64 integers into double floats (53 bits mantissa)
const value = low | (high << 32)
if ((value & (Math.pow(2, 32) - 1)) !== low) {
throw new Error('Unsupported VMDK, ' + valueName + ' is too big')
}
return value
}
function parseDescriptor(descriptorSlice) {
const descriptorText = descriptorSlice.toString('ascii').replace(/\x00+$/, '') // eslint-disable-line no-control-regex
@@ -60,74 +37,11 @@ function parseDescriptor(descriptorSlice) {
return { descriptor: descriptorDict, extents: extentList }
}
function parseFlags(flagBuffer) {
const number = flagBuffer.readUInt32LE(0)
return {
newLineTest: !!(number & (1 << 0)),
useSecondaryGrain: !!(number & (1 << 1)),
useZeroedGrainTable: !!(number & (1 << 2)),
compressedGrains: !!(number & (1 << 16)),
hasMarkers: !!(number & (1 << 17)),
}
}
function parseHeader(buffer) {
const magicString = buffer.slice(0, 4).toString('ascii')
if (magicString !== 'KDMV') {
throw new Error('not a VMDK file')
}
const version = buffer.readUInt32LE(4)
if (version !== 1 && version !== 3) {
throw new Error(
'unsupported VMDK version ' +
version +
', only version 1 and 3 are supported'
)
}
const flags = parseFlags(buffer.slice(8, 12))
const capacitySectors = parseU64b(buffer, 12, 'capacitySectors')
const grainSizeSectors = parseU64b(buffer, 20, 'grainSizeSectors')
const descriptorOffsetSectors = parseU64b(
buffer,
28,
'descriptorOffsetSectors'
)
const descriptorSizeSectors = parseU64b(buffer, 36, 'descriptorSizeSectors')
const numGTEsPerGT = buffer.readUInt32LE(44)
const rGrainDirectoryOffsetSectors = parseS64b(
buffer,
48,
'rGrainDirectoryOffsetSectors'
)
const grainDirectoryOffsetSectors = parseS64b(
buffer,
56,
'grainDirectoryOffsetSectors'
)
const overheadSectors = parseS64b(buffer, 64, 'overheadSectors')
const compressionMethod = compressionMap[buffer.readUInt16LE(77)]
const l1EntrySectors = numGTEsPerGT * grainSizeSectors
return {
flags,
compressionMethod,
grainSizeSectors,
overheadSectors,
capacitySectors,
descriptorOffsetSectors,
descriptorSizeSectors,
grainDirectoryOffsetSectors,
rGrainDirectoryOffsetSectors,
l1EntrySectors,
numGTEsPerGT,
}
}
async function readGrain(offsetSectors, buffer, compressed) {
function readGrain(offsetSectors, buffer, compressed) {
const offset = offsetSectors * SECTOR_SIZE
const size = buffer.readUInt32LE(offset + 8)
const grainBuffer = buffer.slice(offset + 12, offset + 12 + size)
const grainContent = compressed
? await zlib.inflateSync(grainBuffer)
: grainBuffer
const grainContent = compressed ? zlib.inflateSync(grainBuffer) : grainBuffer
const lba = parseU64b(buffer, offset, 'l2Lba')
return {
offsetSectors: offsetSectors,
@@ -141,7 +55,7 @@ async function readGrain(offsetSectors, buffer, compressed) {
}
}
function tryToParseMarker(buffer) {
function parseMarker(buffer) {
const value = buffer.readUInt32LE(0)
const size = buffer.readUInt32LE(8)
const type = buffer.readUInt32LE(12)
@@ -153,7 +67,9 @@ function alignSectors(number) {
}
export default class VMDKDirectParser {
constructor(readStream) {
constructor(readStream, grainLogicalAddressList, grainFileOffsetList) {
this.grainLogicalAddressList = grainLogicalAddressList
this.grainFileOffsetList = grainFileOffsetList
this.virtualBuffer = new VirtualBuffer(readStream)
this.header = null
}
@@ -262,41 +178,60 @@ export default class VMDKDirectParser {
return this.header
}
async *blockIterator() {
while (!this.virtualBuffer.isDepleted) {
const position = this.virtualBuffer.position
const sector = await this.virtualBuffer.readChunk(
SECTOR_SIZE,
'marker start ' + position
async parseMarkedGrain(expectedLogicalAddress) {
const position = this.virtualBuffer.position
const sector = await this.virtualBuffer.readChunk(
SECTOR_SIZE,
'marker start ' + position
)
const marker = parseMarker(sector)
if (marker.size === 0) {
throw new Error(`expected grain marker, received ${marker}`)
} else if (marker.size > 10) {
const grainDiskSize = marker.size + 12
const alignedGrainDiskSize = alignSectors(grainDiskSize)
const remainOfBufferSize = alignedGrainDiskSize - SECTOR_SIZE
const remainderOfGrainBuffer = await this.virtualBuffer.readChunk(
remainOfBufferSize,
'grain remainder ' + this.virtualBuffer.position
)
if (sector.length === 0) {
break
}
const marker = tryToParseMarker(sector)
if (marker.size === 0) {
if (marker.value !== 0) {
await this.virtualBuffer.readChunk(
marker.value * SECTOR_SIZE,
'other marker value ' + this.virtualBuffer.position
)
}
} else if (marker.size > 10) {
const grainDiskSize = marker.size + 12
const alignedGrainDiskSize = alignSectors(grainDiskSize)
const remainOfBufferSize = alignedGrainDiskSize - SECTOR_SIZE
const remainderOfGrainBuffer = await this.virtualBuffer.readChunk(
remainOfBufferSize,
'grain remainder ' + this.virtualBuffer.position
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
const grainObject = readGrain(
0,
grainBuffer,
this.header.compressionMethod === compressionDeflate &&
this.header.flags.compressedGrains
)
assert.strictEqual(grainObject.lba * SECTOR_SIZE, expectedLogicalAddress)
return grainObject.grain
}
}
async *blockIterator() {
for (
let tableIndex = 0;
tableIndex < this.grainFileOffsetList.length;
tableIndex++
) {
const position = this.virtualBuffer.position
const grainPosition = this.grainFileOffsetList[tableIndex]
const grainSizeBytes = this.header.grainSizeSectors * 512
const lba = this.grainLogicalAddressList[tableIndex]
// console.log('VMDK before blank', position, grainPosition,'lba', lba, 'tableIndex', tableIndex, 'grainFileOffsetList.length', this.grainFileOffsetList.length)
await this.virtualBuffer.readChunk(
grainPosition - position,
'blank before ' + position
)
let grain
if (this.header.flags.hasMarkers) {
grain = await this.parseMarkedGrain(lba)
} else {
grain = await this.virtualBuffer.readChunk(
grainSizeBytes,
'grain ' + this.virtualBuffer.position
)
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
const grain = await readGrain(
0,
grainBuffer,
this.header.compressionMethod === compressionDeflate &&
this.header.flags.compressedGrains
)
yield { offsetBytes: grain.lbaBytes, data: grain.grain }
}
yield { logicalAddressBytes: lba, data: grain }
}
}
}

View File

@@ -51,18 +51,26 @@ test('VMDK to VHD can convert a random data file with VMDKDirectParser', async (
const reconvertedFromVmdk = 'from-vhd-by-vbox.raw'
const dataSize = 100 * 1024 * 1024 // this number is an integer head/cylinder/count equation solution
try {
await execa.shell(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName
await execa(
'base64 /dev/urandom | head -c ' + dataSize + ' > ' + inputRawFileName,
[],
{ shell: true }
)
await execa.shell(
await execa(
'python /usr/share/pyshared/VMDKstream.py ' +
inputRawFileName +
' ' +
vmdkFileName
vmdkFileName,
[],
{ shell: true }
)
const result = await readVmdkGrainTable(createFileAccessor(vmdkFileName))
const pipe = (
await convertFromVMDK(createReadStream(vmdkFileName), result)
await convertFromVMDK(
createReadStream(vmdkFileName),
result.grainLogicalAddressList,
result.grainFileOffsetList
)
).pipe(createWriteStream(vhdFileName))
await eventToPromise(pipe, 'finish')
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdFileName])

View File

@@ -251,6 +251,7 @@ const messages = {
stateEnabled: 'Enabled',
// ----- Labels -----
labelDisk: 'Disk',
labelMerge: 'Merge',
labelSize: 'Size',
labelSpeed: 'Speed',
@@ -261,6 +262,7 @@ const messages = {
// ----- Forms -----
formCancel: 'Cancel',
formCreate: 'Create',
formDescription: 'Description',
formEdit: 'Edit',
formId: 'ID',
formName: 'Name',
@@ -1432,6 +1434,12 @@ const messages = {
vmImportFileType: '{type} file:',
vmImportConfigAlert: 'Please check and/or modify the VM configuration.',
// ---- Disk import ---
diskImportFailed: 'Disk import failed',
diskImportSuccess: 'Disk import success',
dropDisksFiles: 'Drop VMDK or VHD files here to import disks.',
importToSr: 'To SR',
// ---- Tasks ---
cancelTask: 'Cancel',
destroyTask: 'Destroy',

View File

@@ -1468,6 +1468,33 @@ export const importVms = (vms, sr) =>
)
).then(ids => ids.filter(_ => _ !== undefined))
const importDisk = async ({ description, file, name, type, vmdkData }, sr) => {
const res = await _call('disk.import', {
description,
name,
sr: resolveId(sr),
type,
vmdkData,
})
const result = await post(res.$sendTo, file)
if (result.status !== 200) {
throw result.status
}
success(_('diskImportSuccess'), name)
const body = await result.json()
await body.result
}
export const importDisks = (disks, sr) =>
Promise.all(
map(disks, disk =>
importDisk(disk, sr).catch(err => {
error(_('diskImportFailed'), err)
throw err
})
)
)
import ExportVmModalBody from './export-vm-modal' // eslint-disable-line import/first
export const exportVm = vm =>
confirm({

View File

@@ -0,0 +1,184 @@
import _ from 'intl'
import ActionButton from 'action-button'
import Button from 'button'
import Collapse from 'collapse'
import decorate from 'apply-decorators'
import Dropzone from 'dropzone'
import fromEvent from 'promise-toolbox/fromEvent'
import Icon from 'icon'
import React from 'react'
import { Container } from 'grid'
import { formatSize } from 'utils'
import { generateId, linkState } from 'reaclette-utils'
import { importDisks } from 'xo'
import { injectIntl } from 'react-intl'
import { injectState, provideState } from 'reaclette'
import { InputCol, LabelCol, Row } from 'form-grid'
import { map } from 'lodash'
import { readCapacityAndGrainTable } from 'xo-vmdk-to-vhd'
import { SelectSr } from 'select-objects'
const getInitialState = () => ({
disks: [],
mapDescriptions: {},
mapNames: {},
sr: undefined,
loadingDisks: false,
})
const DiskImport = decorate([
provideState({
initialState: getInitialState,
effects: {
handleDrop: async function(_, files) {
this.state.loadingDisks = true
const disks = await Promise.all(
map(files, async file => {
const { name } = file
const extIndex = name.lastIndexOf('.')
let type
if (
extIndex >= 0 &&
(type = name.slice(extIndex + 1)) &&
(type === 'vmdk' || type === 'vhd')
) {
let vmdkData
if (type === 'vmdk') {
const parsed = await readCapacityAndGrainTable(
async (start, end) => {
/* global FileReader */
const reader = new FileReader()
reader.readAsArrayBuffer(file.slice(start, end))
return (await fromEvent(reader, 'loadend')).target.result
}
)
const table = await parsed.tablePromise
vmdkData = {
grainLogicalAddressList: table.grainLogicalAddressList,
grainFileOffsetList: table.grainFileOffsetList,
capacity: parsed.capacityBytes,
}
}
return {
id: generateId(),
file,
name,
sr: this.state.sr,
type,
vmdkData,
}
}
})
)
return { disks, loadingDisks: false }
},
import: () => async ({ disks, mapDescriptions, mapNames, sr }) => {
await importDisks(
disks.map(({ id, name, ...disk }) => ({
...disk,
name: mapNames[id] || name,
description: mapDescriptions[id],
})),
sr
)
},
linkState,
onChangeDescription: (_, { target: { name, value } }) => ({
mapDescriptions,
}) => {
mapDescriptions[name] = value
return { mapDescriptions }
},
onChangeName: (_, { target: { name, value } }) => ({ mapNames }) => {
mapNames[name] = value
return { mapNames }
},
onChangeSr: (_, sr) => ({ sr }),
reset: getInitialState,
},
}),
injectIntl,
injectState,
({
effects,
state: { disks, loadingDisks, mapDescriptions, mapNames, sr },
}) => (
<Container>
<form id='import-form'>
<Row>
<LabelCol>{_('importToSr')}</LabelCol>
<InputCol>
<SelectSr onChange={effects.onChangeSr} required value={sr} />
</InputCol>
</Row>
{sr !== undefined && (
<div>
<Dropzone
onDrop={effects.handleDrop}
message={_('dropDisksFiles')}
/>
{loadingDisks && <Icon icon='loading' />}
{disks.length > 0 && (
<div>
<div>
{disks.map(({ file: { name, size }, id }) => (
<Collapse
buttonText={`${name} - ${formatSize(size)}`}
key={id}
size='small'
className='mb-1'
>
<div className='mt-1'>
<Row>
<LabelCol>{_('formName')}</LabelCol>
<InputCol>
<input
className='form-control'
name={id}
onChange={effects.onChangeName}
placeholder={name}
type='text'
value={mapNames[id]}
/>
</InputCol>
</Row>
<Row>
<LabelCol>{_('formDescription')}</LabelCol>
<InputCol>
<input
className='form-control'
name={id}
onChange={effects.onChangeDescription}
type='text'
value={mapDescriptions[id]}
/>
</InputCol>
</Row>
</div>
</Collapse>
))}
</div>
<div className='form-group pull-right'>
<ActionButton
btnStyle='primary'
className='mr-1'
form='import-form'
handler={effects.import}
icon='import'
redirectOnSuccess={`/srs/${sr.id}/disks`}
type='submit'
>
{_('newImport')}
</ActionButton>
<Button onClick={effects.reset}>{_('formReset')}</Button>
</div>
</div>
)}
</div>
)}
</form>
</Container>
),
])
export { DiskImport as default }

View File

@@ -0,0 +1,43 @@
import _ from 'intl'
import Icon from 'icon'
import Page from '../page'
import React from 'react'
import { Col, Container, Row } from 'grid'
import { NavLink, NavTabs } from 'nav'
import { routes } from 'utils'
import DiskImport from '../disk-import'
import VmImport from '../vm-import'
const HEADER = (
<Container>
<Row>
<Col mediumSize={3}>
<h2>
<Icon icon='import' /> {_('newImport')}
</h2>
</Col>
<Col mediumSize={9}>
<NavTabs className='pull-right'>
<NavLink to='/import/vm'>
<Icon icon='vm' /> {_('labelVm')}
</NavLink>
<NavLink to='/import/disk'>
<Icon icon='disk' /> {_('labelDisk')}
</NavLink>
</NavTabs>
</Col>
</Row>
</Container>
)
const Import = routes('import', {
disk: DiskImport,
vm: VmImport,
})(({ children }) => (
<Page header={HEADER} title='newImport' formatTitle>
{children}
</Page>
))
export default Import

View File

@@ -39,10 +39,10 @@ import Sr from './sr'
import Tasks from './tasks'
import User from './user'
import Vm from './vm'
import VmImport from './vm-import'
import Xoa from './xoa'
import XoaUpdates from './xoa/update'
import Xosan from './xosan'
import Import from './import'
import keymap, { help } from '../keymap'
@@ -98,11 +98,11 @@ const BODY_STYLE = {
'srs/:id': Sr,
tasks: Tasks,
user: User,
'vms/import': VmImport,
'vms/new': NewVm,
'vms/:id': Vm,
xoa: Xoa,
xosan: Xosan,
import: Import,
hub: Hub,
})
@connectStore(state => {

View File

@@ -342,6 +342,23 @@ export default class Menu extends Component {
pill: nTasks,
},
isAdmin && { to: '/xosan', icon: 'menu-xosan', label: 'xosan' },
!noOperatablePools && {
to: '/import/vm',
icon: 'menu-new-import',
label: 'newImport',
subMenu: [
{
to: '/import/vm',
icon: 'vm',
label: 'labelVm',
},
{
to: '/import/disk',
icon: 'disk',
label: 'labelDisk',
},
],
},
!(noOperatablePools && noResourceSets) && {
to: '/vms/new',
icon: 'menu-new',
@@ -365,11 +382,6 @@ export default class Menu extends Component {
icon: 'menu-settings-servers',
label: 'newServerPage',
},
!noOperatablePools && {
to: '/vms/import',
icon: 'menu-new-import',
label: 'newImport',
},
],
},
]

View File

@@ -4,7 +4,6 @@ import ActionButton from 'action-button'
import Button from 'button'
import Component from 'base-component'
import Dropzone from 'dropzone'
import Icon from 'icon'
import isEmpty from 'lodash/isEmpty'
import map from 'lodash/map'
import orderBy from 'lodash/orderBy'
@@ -22,7 +21,6 @@ import {
import { connectStore, formatSize, mapPlus, noop } from 'utils'
import { SelectNetwork, SelectPool, SelectSr } from 'select-objects'
import Page from '../page'
import parseOvaFile from './ova'
import styles from './index.css'
@@ -34,18 +32,6 @@ const FORMAT_TO_HANDLER = {
xva: noop,
}
const HEADER = (
<Container>
<Row>
<Col>
<h2>
<Icon icon='import' /> {_('newImport')}
</h2>
</Col>
</Row>
</Container>
)
// ===================================================================
@connectStore(
@@ -325,103 +311,99 @@ export default class Import extends Component {
const { pool, sr, srPredicate, vms } = this.state
return (
<Page header={HEADER} title='newImport' formatTitle>
<Container>
<form id='import-form'>
<FormGrid.Row>
<FormGrid.LabelCol>{_('vmImportToPool')}</FormGrid.LabelCol>
<FormGrid.InputCol>
<SelectPool
value={pool}
onChange={this._handleSelectedPool}
required
/>
</FormGrid.InputCol>
</FormGrid.Row>
<FormGrid.Row>
<FormGrid.LabelCol>{_('vmImportToSr')}</FormGrid.LabelCol>
<FormGrid.InputCol>
<SelectSr
disabled={!pool}
onChange={this._handleSelectedSr}
predicate={srPredicate}
required
value={sr}
/>
</FormGrid.InputCol>
</FormGrid.Row>
{sr && (
<div>
<Dropzone
onDrop={this._handleDrop}
message={_('importVmsList')}
/>
<hr />
<h5>{_('vmsToImport')}</h5>
{vms.length > 0 ? (
<div>
{map(vms, ({ data, error, file, type }, vmIndex) => (
<div key={file.preview} className={styles.vmContainer}>
<strong>{file.name}</strong>
<span className='pull-right'>
<strong>{`(${formatSize(file.size)})`}</strong>
</span>
{!error ? (
data && (
<div>
<hr />
<div className='alert alert-info' role='alert'>
<strong>
{_('vmImportFileType', { type })}
</strong>{' '}
{_('vmImportConfigAlert')}
</div>
<VmData
{...data}
ref={`vm-data-${vmIndex}`}
pool={pool}
/>
</div>
)
) : (
<Container>
<form id='import-form'>
<FormGrid.Row>
<FormGrid.LabelCol>{_('vmImportToPool')}</FormGrid.LabelCol>
<FormGrid.InputCol>
<SelectPool
value={pool}
onChange={this._handleSelectedPool}
required
/>
</FormGrid.InputCol>
</FormGrid.Row>
<FormGrid.Row>
<FormGrid.LabelCol>{_('vmImportToSr')}</FormGrid.LabelCol>
<FormGrid.InputCol>
<SelectSr
disabled={!pool}
onChange={this._handleSelectedSr}
predicate={srPredicate}
required
value={sr}
/>
</FormGrid.InputCol>
</FormGrid.Row>
{sr && (
<div>
<Dropzone
onDrop={this._handleDrop}
message={_('importVmsList')}
/>
<hr />
<h5>{_('vmsToImport')}</h5>
{vms.length > 0 ? (
<div>
{map(vms, ({ data, error, file, type }, vmIndex) => (
<div key={file.preview} className={styles.vmContainer}>
<strong>{file.name}</strong>
<span className='pull-right'>
<strong>{`(${formatSize(file.size)})`}</strong>
</span>
{!error ? (
data && (
<div>
<hr />
<div className='alert alert-danger' role='alert'>
<strong>{_('vmImportError')}</strong>{' '}
{(error && error.message) ||
_('noVmImportErrorDescription')}
<div className='alert alert-info' role='alert'>
<strong>{_('vmImportFileType', { type })}</strong>{' '}
{_('vmImportConfigAlert')}
</div>
<VmData
{...data}
ref={`vm-data-${vmIndex}`}
pool={pool}
/>
</div>
)}
</div>
))}
</div>
) : (
<p>{_('noSelectedVms')}</p>
)}
<hr />
<div className='form-group pull-right'>
<ActionButton
btnStyle='primary'
disabled={!vms.length}
className='mr-1'
form='import-form'
handler={this._import}
icon='import'
redirectOnSuccess={getRedirectionUrl}
type='submit'
>
{_('newImport')}
</ActionButton>
<Button onClick={this._handleCleanSelectedVms}>
{_('importVmsCleanList')}
</Button>
)
) : (
<div>
<hr />
<div className='alert alert-danger' role='alert'>
<strong>{_('vmImportError')}</strong>{' '}
{(error && error.message) ||
_('noVmImportErrorDescription')}
</div>
</div>
)}
</div>
))}
</div>
) : (
<p>{_('noSelectedVms')}</p>
)}
<hr />
<div className='form-group pull-right'>
<ActionButton
btnStyle='primary'
disabled={!vms.length}
className='mr-1'
form='import-form'
handler={this._import}
icon='import'
redirectOnSuccess={getRedirectionUrl}
type='submit'
>
{_('newImport')}
</ActionButton>
<Button onClick={this._handleCleanSelectedVms}>
{_('importVmsCleanList')}
</Button>
</div>
)}
</form>
</Container>
</Page>
</div>
)}
</form>
</Container>
)
}
}