feat: support VDI export in VMDK (#5982)

Co-authored-by: Rajaa.BARHTAOUI <rajaa.barhtaoui@gmail.com>
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
Co-authored-by: Florent BEAUCHAMP <flo850@free.fr>
This commit is contained in:
Nicolas Raynaud
2022-01-16 18:40:08 +01:00
committed by GitHub
parent 422a22a767
commit 9375b1c8bd
11 changed files with 409 additions and 13 deletions

View File

@@ -12,6 +12,7 @@
- [Proxy] Now ships a reverse proxy [PR#6072](https://github.com/vatesfr/xen-orchestra/pull/6072)
- [Delta Backup] When using S3 remote, retry uploading VHD parts on Internal Error to support [Blackblaze](https://www.backblaze.com/b2/docs/calling.html#error_handling) (PR [#6086](https://github.com/vatesfr/xen-orchestra/issues/6086)) (Forum [5397](https://xcp-ng.org/forum/topic/5397/delta-backups-failing-aws-s3-uploadpartcopy-cpu-too-busy/5))
- [Backup] Add sanity check of aliases on S3 remotes (PR [6043](https://github.com/vatesfr/xen-orchestra/pull/6043))
- [Export/Disks] Allow the export of disks in VMDK format(PR [#5982](https://github.com/vatesfr/xen-orchestra/pull/5982))
### Bug fixes
@@ -38,7 +39,9 @@
- @xen-orchestra/fs minor
- vhd-lib minor
- xo-vmdk-to-vhd minor
- @xen-orchestra/backups minor
- @xen-orchestra/backups-cli minor
- @xen-orchestra/proxy minor
- xo-server minor
- xo-web minor

View File

@@ -79,9 +79,12 @@ create.resolve = {
// -------------------------------------------------------------------
async function handleExportContent(req, res, { xapi, id }) {
const stream = await xapi.exportVdiContent(id)
req.on('close', () => stream.cancel())
const VHD = 'vhd'
const VMDK = 'vmdk'
async function handleExportContent(req, res, { xapi, id, filename, format }) {
const stream = format === VMDK ? await xapi.exportVdiAsVmdk(id, filename) : await xapi.exportVdiContent(id)
req.on('close', () => stream.destroy())
// Remove the filename as it is already part of the URL.
stream.headers['content-disposition'] = 'attachment'
@@ -94,16 +97,19 @@ async function handleExportContent(req, res, { xapi, id }) {
})
}
export async function exportContent({ vdi }) {
export async function exportContent({ vdi, format = VHD }) {
const filename = (vdi.name_label || 'unknown') + '.' + (format === VHD ? 'vhd' : 'vmdk')
return {
$getFrom: await this.registerHttpRequest(
handleExportContent,
{
id: vdi._xapiId,
xapi: this.getXapi(vdi),
filename,
format,
},
{
suffix: `/${encodeURIComponent(vdi.name_label)}.vhd`,
suffix: `/${encodeURIComponent(filename)}`,
}
),
}
@@ -112,6 +118,7 @@ export async function exportContent({ vdi }) {
exportContent.description = 'export the content of a VDI'
exportContent.params = {
id: { type: 'string' },
format: { eq: [VMDK, VHD], optional: true },
}
exportContent.resolve = {
vdi: ['id', ['VDI', 'VDI-snapshot'], 'view'],

View File

@@ -20,8 +20,8 @@ import semver from 'semver'
import tarStream from 'tar-stream'
import uniq from 'lodash/uniq.js'
import { asyncMap } from '@xen-orchestra/async-map'
import { vmdkToVhd } from 'xo-vmdk-to-vhd'
import { cancelable, fromEvents, ignoreErrors, pCatch, pRetry } from 'promise-toolbox'
import { vmdkToVhd, vhdToVMDK } from 'xo-vmdk-to-vhd'
import { cancelable, CancelToken, fromEvents, ignoreErrors, pCatch, pRetry } from 'promise-toolbox'
import { createLogger } from '@xen-orchestra/log'
import { decorateWith } from '@vates/decorate-with'
import { defer as deferrable } from 'golike-defer'
@@ -1707,6 +1707,24 @@ export default class Xapi extends XapiBase {
})
}
async exportVdiAsVmdk(vdi, filename, { cancelToken = CancelToken.none, base } = {}) {
vdi = this.getObject(vdi)
const params = { cancelToken, format: VDI_FORMAT_VHD }
if (base !== undefined) {
params.base = base
}
const vhdResult = await this.VDI_exportContent(vdi.$ref, params)
const vmdkStream = await vhdToVMDK(filename, vhdResult)
// callers expect the stream to be an HTTP response.
vmdkStream.headers = {
...vhdResult.headers,
'content-type': 'application/x-vmdk',
}
vmdkStream.statusCode = vhdResult.statusCode
vmdkStream.statusMessage = vhdResult.statusMessage
return vmdkStream
}
@cancelable
exportVdiContent($cancelToken, vdi, { format } = {}) {
return this._exportVdi($cancelToken, this.getObject(vdi), undefined, format)

View File

@@ -1,5 +1,12 @@
export const SECTOR_SIZE = 512
export const compressionDeflate = 'COMPRESSION_DEFLATE'
export const compressionNone = 'COMPRESSION_NONE'
export const MARKER_EOS = 0
export const MARKER_GT = 1
export const MARKER_GD = 2
export const MARKER_FOOTER = 3
const compressionMap = [compressionNone, compressionDeflate]
export function parseFlags(flagBuffer) {
@@ -71,3 +78,40 @@ export function unpackHeader(buffer) {
numGTEsPerGT,
}
}
export function createStreamOptimizedHeader(capacitySectors, descriptorSizeSectors, grainDirectoryOffsetSectors = -1) {
const headerBuffer = Buffer.alloc(SECTOR_SIZE)
Buffer.from('KDMV', 'ascii').copy(headerBuffer, 0)
// version
headerBuffer.writeUInt32LE(3, 4)
// newline, compression, markers
const flags = 1 | 1 << 16 | 1 << 17
headerBuffer.writeUInt32LE(flags, 8)
headerBuffer.writeBigUInt64LE(BigInt(capacitySectors), 12)
const grainSizeSectors = 128
headerBuffer.writeBigUInt64LE(BigInt(grainSizeSectors), 20)
const descriptorOffsetSectors = 1
headerBuffer.writeBigUInt64LE(BigInt(descriptorOffsetSectors), 28)
headerBuffer.writeBigUInt64LE(BigInt(descriptorSizeSectors), 36)
const numGTEsPerGT = 512
headerBuffer.writeUInt32LE(numGTEsPerGT, 44)
// The rgdOffset should be ignored because bit 1 of the flags field is not set.
headerBuffer.writeBigInt64LE(BigInt(grainDirectoryOffsetSectors), 56)
const grainDirectoryEntries = Math.ceil(Math.ceil(capacitySectors / grainSizeSectors) / numGTEsPerGT)
const grainTableEntries = grainDirectoryEntries * numGTEsPerGT
const grainDirectorySizeSectors = Math.ceil(grainDirectoryEntries * 4 / SECTOR_SIZE)
const grainTableSizeSectors = Math.ceil(grainTableEntries * 4 / SECTOR_SIZE)
const overheadSectors = 1 + descriptorSizeSectors + grainDirectorySizeSectors + grainTableSizeSectors
headerBuffer.writeBigInt64LE(BigInt(overheadSectors), 64)
// newline mangling detector
headerBuffer.write('\n \r\n', 73, 4, 'ascii')
// use DEFLATE
headerBuffer.writeUInt16LE(1, 77)
return {
buffer: headerBuffer,
grainDirectorySizeSectors,
grainTableSizeSectors,
grainDirectoryEntries,
grainTableEntries
}
}

View File

@@ -1,7 +1,9 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import createReadableSparseStream from 'vhd-lib/createReadableSparseStream.js'
import { parseOVAFile, ParsableFile } from './ova'
import VMDKDirectParser from './vmdk-read'
import { generateVmdkData } from './vmdk-generate'
import { parseVhdToBlocks } from './parseVhdToBlocks.js'
export { default as readVmdkGrainTable, readCapacityAndGrainTable } from './vmdk-read-table'
@@ -24,4 +26,15 @@ async function vmdkToVhd(vmdkReadStream, grainLogicalAddressList, grainFileOffse
)
}
export { ParsableFile, parseOVAFile, vmdkToVhd }
/**
*
* @param diskName
* @param vhdReadStream
* @returns a readable stream representing a VMDK file
*/
async function vhdToVMDK(diskName, vhdReadStream) {
const { blockSize, blocks, diskSize, geometry } = await parseVhdToBlocks(vhdReadStream)
return asyncIteratorToStream(generateVmdkData(diskName, diskSize, blockSize, blocks, geometry))
}
export { ParsableFile, parseOVAFile, vmdkToVhd, vhdToVMDK }

View File

@@ -0,0 +1,43 @@
import assert from 'assert'
import { parseVhdStream } from 'vhd-lib'
async function next(iterator, type, skipableType) {
let item
do {
const cursor = await iterator.next()
assert.strictEqual(cursor.done, false, 'iterator is done')
item = cursor.value
} while (item.type === skipableType)
assert.strictEqual(item.type, type)
return item
}
async function* onlyBlocks(iterable, blockSize) {
for await (const item of iterable) {
if (item.type === 'block') {
// transform to blocks expected by generateVmdkData
// rename the property and remove the block bitmap
const { id, data } = item
yield { lba: id * blockSize, block: data }
}
}
}
export async function parseVhdToBlocks(vhdStream) {
const iterator = parseVhdStream(vhdStream)
const { footer } = await next(iterator, 'footer')
const { header } = await next(iterator, 'header')
// ignore all parent locators that could be before the BAT
const { blockCount } = await next(iterator, 'bat', 'parentLocator')
return {
blockSize: header.blockSize,
blockCount,
diskSize: footer.currentSize,
geometry: footer.diskGeometry,
blocks: onlyBlocks(iterator, header.blockSize),
}
}

View File

@@ -0,0 +1,163 @@
import * as assert from 'assert'
import zlib from 'zlib'
import {
SECTOR_SIZE,
createStreamOptimizedHeader,
unpackHeader,
MARKER_GT,
MARKER_GD,
MARKER_FOOTER,
MARKER_EOS,
} from './definitions'
/**
* - block is an input bunch of bytes, VHD default size is 2MB
* - grain is an output (VMDK) bunch of bytes, VMDK default is 64KB
* expected ratio for default values is 16 VMDK grains for one VHD block
* this function errors if blockSize < grainSize
* The generated file is streamoptimized, compressed grains, tables at the end.
* @param diskName
* @param diskCapacityBytes
* @param blockSizeBytes
* @param blockGenerator async generator of {lba:Number, block:Buffer} objects.
* @param geometry an object of shape {sectorsPerTrackCylinder,heads,cylinders}
* @returns an Async generator of Buffers representing the VMDK file fragments
*/
export async function generateVmdkData(
diskName,
diskCapacityBytes,
blockSizeBytes,
blockGenerator,
geometry = {
sectorsPerTrackCylinder: 63,
heads: 16,
cylinders: 10402,
}
) {
const cid = Math.floor(Math.random() * Math.pow(2, 32))
const diskCapacitySectors = Math.ceil(diskCapacityBytes / SECTOR_SIZE)
// Virtual Box can't parse indented descriptors
const descriptor = `# Disk DescriptorFile
version=1
CID=${cid}
parentCID=ffffffff
createType="streamOptimized"
# Extent description
RW ${diskCapacitySectors} SPARSE "${diskName}"
# The Disk Data Base
#DDB
ddb.adapterType = "ide"
ddb.geometry.sectors = "${geometry.sectorsPerTrackCylinder}"
ddb.geometry.heads = "${geometry.heads}"
ddb.geometry.cylinders = "${geometry.cylinders}"
`
const utf8Descriptor = Buffer.from(descriptor, 'utf8')
const descriptorSizeSectors = Math.ceil(utf8Descriptor.length / SECTOR_SIZE)
const descriptorBuffer = Buffer.alloc(descriptorSizeSectors * SECTOR_SIZE)
utf8Descriptor.copy(descriptorBuffer)
const headerData = createStreamOptimizedHeader(diskCapacitySectors, descriptorSizeSectors)
const parsedHeader = unpackHeader(headerData.buffer)
const grainSizeBytes = parsedHeader.grainSizeSectors * SECTOR_SIZE
if (blockSizeBytes % grainSizeBytes !== 0 || blockSizeBytes === 0) {
throw new Error(
`createReadableVmdkStream can only accept block size multiple of ${grainSizeBytes}, got ${blockSizeBytes}`
)
}
const grainTableEntries = headerData.grainTableEntries
const tableBuffer = Buffer.alloc(grainTableEntries * 4)
let streamPosition = 0
let directoryOffset = 0
const roundToSector = value => Math.ceil(value / SECTOR_SIZE) * SECTOR_SIZE
function track(buffer) {
assert.equal(streamPosition % SECTOR_SIZE, 0)
if (buffer.length > 0) {
streamPosition += buffer.length
}
return buffer
}
function createEmptyMarker(type) {
const buff = Buffer.alloc(SECTOR_SIZE)
buff.writeBigUInt64LE(BigInt(0), 0)
buff.writeUInt32LE(0, 8)
buff.writeUInt32LE(type, 12)
return buff
}
function createDirectoryBuffer(grainDirectoryEntries, tablePosition) {
const OFFSET_SIZE = 4
directoryOffset = streamPosition
const buff = Buffer.alloc(roundToSector(grainDirectoryEntries * OFFSET_SIZE))
for (let i = 0; i < grainDirectoryEntries; i++) {
buff.writeUInt32LE((tablePosition + i * parsedHeader.numGTEsPerGT * OFFSET_SIZE) / SECTOR_SIZE, i * OFFSET_SIZE)
}
return buff
}
function bufferIsBlank(buffer) {
for (const b of buffer) {
if (b !== 0) {
return false
}
}
return true
}
function createMarkedGrain(lbaBytes, buffer) {
assert.strictEqual(buffer.length, grainSizeBytes)
assert.strictEqual(lbaBytes % grainSizeBytes, 0)
const markerOverHead = 12
const compressed = zlib.deflateSync(buffer, { level: 9 })
const outputBuffer = Buffer.alloc(roundToSector(markerOverHead + compressed.length))
compressed.copy(outputBuffer, markerOverHead)
outputBuffer.writeBigUInt64LE(BigInt(lbaBytes / SECTOR_SIZE), 0)
outputBuffer.writeUInt32LE(compressed.length, 8)
return outputBuffer
}
async function* emitBlock(blockLbaBytes, buffer, grainSizeBytes) {
assert.strictEqual(buffer.length % grainSizeBytes, 0)
const grainCount = buffer.length / grainSizeBytes
for (let i = 0; i < grainCount; i++) {
const grainLbaBytes = blockLbaBytes + i * grainSizeBytes
const tableIndex = grainLbaBytes / grainSizeBytes
const grainData = buffer.slice(i * grainSizeBytes, (i + 1) * grainSizeBytes)
if (!bufferIsBlank(grainData)) {
tableBuffer.writeUInt32LE(streamPosition / SECTOR_SIZE, tableIndex * 4)
yield track(createMarkedGrain(grainLbaBytes, grainData))
}
}
}
async function* emitBlocks(grainSize, blockGenerator) {
for await (const b of blockGenerator) {
yield* emitBlock(b.lba, b.block, grainSize)
}
}
async function* iterator() {
yield track(headerData.buffer)
yield track(descriptorBuffer)
yield* emitBlocks(grainSizeBytes, blockGenerator)
yield track(createEmptyMarker(MARKER_GT))
const tableOffset = streamPosition
yield track(tableBuffer)
yield track(createEmptyMarker(MARKER_GD))
yield track(createDirectoryBuffer(headerData.grainDirectoryEntries, tableOffset))
yield track(createEmptyMarker(MARKER_FOOTER))
const footer = createStreamOptimizedHeader(
diskCapacitySectors,
descriptorSizeSectors,
directoryOffset / SECTOR_SIZE
)
yield track(footer.buffer)
yield track(createEmptyMarker(MARKER_EOS))
}
return iterator()
}

View File

@@ -9,6 +9,9 @@ import tmp from 'tmp'
import { createReadStream, createWriteStream, stat } from 'fs-extra'
import { pFromCallback } from 'promise-toolbox'
import { vmdkToVhd, readVmdkGrainTable } from '.'
import VMDKDirectParser from './vmdk-read'
import { generateVmdkData } from './vmdk-generate'
import asyncIteratorToStream from 'async-iterator-to-stream'
const initialDir = process.cwd()
jest.setTimeout(100000)
@@ -80,3 +83,48 @@ test('VMDK to VHD can convert a random data file with VMDKDirectParser', async (
throw error
}
})
test('Can generate an empty VMDK file', async () => {
const readStream = asyncIteratorToStream(await generateVmdkData('result.vmdk', 1024 * 1024 * 1024, 1024 * 1024, []))
const pipe = readStream.pipe(createWriteStream('result.vmdk'))
await fromEvent(pipe, 'finish')
await execa('qemu-img', ['check', 'result.vmdk'])
})
test('Can generate a small VMDK file', async () => {
const defaultVhdToVmdkRatio = 16
const blockSize = 1024 * 1024
const b1 = Buffer.allocUnsafe(blockSize)
const b2 = Buffer.allocUnsafe(blockSize)
const blockGenerator = [{ lba: 0, block: b1 }, { lba: blockSize, block: b2 }]
const fileName = 'result.vmdk'
const geometry = { sectorsPerTrackCylinder: 63, heads: 16, cylinders: 10402 }
const readStream = asyncIteratorToStream(await generateVmdkData(fileName, 2 * blockSize, blockSize, blockGenerator, geometry))
const pipe = readStream.pipe(createWriteStream(fileName))
await fromEvent(pipe, 'finish')
const expectedLBAs = []
for (let i = 0; i < blockGenerator.length; i++) {
for (let j = 0; j < defaultVhdToVmdkRatio; j++) {
expectedLBAs.push(expectedLBAs.length)
}
}
const data = await readVmdkGrainTable(createFileAccessor(fileName))
expect(bufferToArray(data.grainLogicalAddressList)).toEqual(expectedLBAs)
const grainFileOffsetList = bufferToArray(data.grainFileOffsetList)
const parser = new VMDKDirectParser(createReadStream(fileName), bufferToArray(data.grainLogicalAddressList), grainFileOffsetList, false)
await parser.readHeader()
const resLbas = []
const resBuffers = []
const DEFAULT_GRAIN_SIZE = 65536
for await (const b of parser.blockIterator()) {
resLbas.push(b.logicalAddressBytes / DEFAULT_GRAIN_SIZE)
resBuffers.push(b.data)
}
const resultBuffer = Buffer.concat(resBuffers)
const startingBuffer = Buffer.concat([b1, b2])
expect(resultBuffer).toEqual(startingBuffer)
expect(resLbas).toEqual(expectedLBAs)
await execa('qemu-img', ['check', 'result.vmdk'])
})

View File

@@ -1132,6 +1132,7 @@ const messages = {
destroySelectedVdis: 'Destroy selected VDIs',
destroyVdi: 'Destroy VDI',
exportVdi: 'Export VDI content',
format: 'Format',
importVdi: 'Import VDI content',
importVdiNoFile: 'No file selected',
selectVdiMessage: 'Drop VHD file here',
@@ -1141,6 +1142,8 @@ const messages = {
warningVdiSr: "The VDIs' SRs must either be shared or on the same host for the VM to be able to start.",
removeSelectedVdisFromVm: 'Remove selected VDIs from this VM',
removeVdiFromVm: 'Remove VDI from this VM',
vhd: 'VHD',
vmdk: 'VMDK',
// ----- VM network tab -----

View File

@@ -0,0 +1,48 @@
import BaseComponent from 'base-component'
import React from 'react'
import _ from '../../intl'
import { Container, Row, Col } from '../../grid'
import { Select } from '../../form'
const OPTIONS = [
{
label: _('vhd'),
value: 'vhd',
},
{
label: _('vmdk'),
value: 'vmdk',
},
]
export default class ExportVdiModalBody extends BaseComponent {
state = {
format: 'vhd',
}
get value() {
return this.state.format
}
render() {
return (
<Container>
<Row>
<Col mediumSize={6}>
<strong>{_('format')}</strong>
</Col>
<Col mediumSize={6}>
<Select
labelKey='label'
options={OPTIONS}
required
simpleValue
onChange={this.linkState('format')}
value={this.state.format}
/>
</Col>
</Row>
</Container>
)
}
}

View File

@@ -1716,10 +1716,16 @@ export const exportVm = async vm => {
window.open(`.${url}`)
}
export const exportVdi = vdi => {
const id = resolveId(vdi)
info(_('startVdiExport'), id)
return _call('disk.exportContent', { id }).then(({ $getFrom: url }) => {
import ExportVdiModalBody from './export-vdi-modal' // eslint-disable-line import/first
export const exportVdi = async vdi => {
const format = await confirm({
body: <ExportVdiModalBody />,
icon: 'export',
title: _('exportVdi'),
})
info(_('startVdiExport'), vdi.id)
return _call('disk.exportContent', { id: resolveId(vdi), format }).then(({ $getFrom: url }) => {
window.open(`.${url}`)
})
}