diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index 8b0373858..38c4c0ad8 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -6,6 +6,7 @@ - [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053)) - [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059)) - [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050)) +- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726)) ### Bug fixes @@ -17,6 +18,7 @@ ### Released packages +- vhd-lib v0.6.0 - @xen-orchestra/fs v0.8.0 - xo-server v5.38.0 - xo-web v5.38.0 diff --git a/packages/vhd-lib/merge.integ.spec.js b/packages/vhd-lib/merge.integ.spec.js index 93a236cb4..f9e342cef 100644 --- a/packages/vhd-lib/merge.integ.spec.js +++ b/packages/vhd-lib/merge.integ.spec.js @@ -1,38 +1,40 @@ /* eslint-env jest */ +import asyncIteratorToStream from 'async-iterator-to-stream' import execa from 'execa' import fs from 'fs-extra' import getStream from 'get-stream' import rimraf from 'rimraf' import tmp from 'tmp' -import { fromEvent, pFromCallback } from 'promise-toolbox' import { getHandler } from '@xen-orchestra/fs' +import { pFromCallback } from 'promise-toolbox' +import { pipeline } from 'readable-stream' import { randomBytes } from 'crypto' import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './' import { SECTOR_SIZE } from './src/_constants' -const initialDir = process.cwd() +let tempDir = null jest.setTimeout(60000) beforeEach(async () => { - const dir = await pFromCallback(cb => tmp.dir(cb)) - process.chdir(dir) + tempDir = await pFromCallback(cb => tmp.dir(cb)) }) afterEach(async () => { - const tmpDir = process.cwd() - process.chdir(initialDir) - await pFromCallback(cb => rimraf(tmpDir, cb)) + await pFromCallback(cb => rimraf(tempDir, cb)) }) -async function createRandomFile(name, sizeMb) { - await execa('bash', [ - '-c', - `< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`, - ]) +async function createRandomFile(name, sizeMB) { + const createRandomStream = asyncIteratorToStream(function*(size) { + while (size-- > 0) { + yield Buffer.from([Math.floor(Math.random() * 256)]) + } + }) + const input = createRandomStream(sizeMB * 1024 * 1024) + await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb)) } async function checkFile(vhdName) { @@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) { test('blocks can be moved', async () => { const initalSize = 4 - await createRandomFile('randomfile', initalSize) - await convertFromRawToVhd('randomfile', 'randomfile.vhd') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler.getSize('randomfile') - const newVhd = new Vhd(handler, 'randomfile.vhd') + const rawFileName = `${tempDir}/randomfile` + await createRandomFile(rawFileName, initalSize) + const vhdFileName = `${tempDir}/randomfile.vhd` + await convertFromRawToVhd(rawFileName, vhdFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler.getSize(rawFileName) + const newVhd = new Vhd(handler, vhdFileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() await newVhd._freeFirstBlockSpace(8000000) - await recoverRawContent('randomfile.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual( - await fs.readFile('randomfile') + const recoveredFileName = `${tempDir}/recovered` + await recoverRawContent(vhdFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual( + await fs.readFile(rawFileName) ) }) test('the BAT MSB is not used for sign', async () => { const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb)) - await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T']) - const handler = getHandler({ url: 'file://' + process.cwd() }) - const vhd = new Vhd(handler, 'empty.vhd') + const emptyFileName = `${tempDir}/empty.vhd` + await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T']) + const handler = getHandler({ url: 'file://' }) + const vhd = new Vhd(handler, emptyFileName) await vhd.readHeaderAndFooter() await vhd.readBlockAllocationTable() // we want the bit 31 to be on, to prove it's not been used for sign const hugeWritePositionSectors = Math.pow(2, 31) + 200 await vhd.writeData(hugeWritePositionSectors, randomBuffer) - await checkFile('empty.vhd') + await checkFile(emptyFileName) // here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32 const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE await vhd._freeFirstBlockSpace(hugePositionBytes) @@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => { // we recover the data manually for speed reasons. // fs.write() with offset is way faster than qemu-img when there is a 1.5To // hole before the block of data - const recoveredFile = await fs.open('recovered', 'w') + const recoveredFileName = `${tempDir}/recovered` + const recoveredFile = await fs.open(recoveredFileName, 'w') try { - const vhd2 = new Vhd(handler, 'empty.vhd') + const vhd2 = new Vhd(handler, emptyFileName) await vhd2.readHeaderAndFooter() await vhd2.readBlockAllocationTable() for (let i = 0; i < vhd.header.maxTableEntries; i++) { @@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => { fs.close(recoveredFile) } const recovered = await getStream.buffer( - await fs.createReadStream('recovered', { + await fs.createReadStream(recoveredFileName, { start: hugePositionBytes, end: hugePositionBytes + randomBuffer.length - 1, }) @@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => { test('writeData on empty file', async () => { const mbOfRandom = 3 - await createRandomFile('randomfile', mbOfRandom) - await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M']) - const randomData = await fs.readFile('randomfile') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler.getSize('randomfile') - const newVhd = new Vhd(handler, 'empty.vhd') + const rawFileName = `${tempDir}/randomfile` + const emptyFileName = `${tempDir}/empty.vhd` + await createRandomFile(rawFileName, mbOfRandom) + await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M']) + const randomData = await fs.readFile(rawFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler.getSize(rawFileName) + const newVhd = new Vhd(handler, emptyFileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() await newVhd.writeData(0, randomData) - await recoverRawContent('empty.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual(randomData) + const recoveredFileName = `${tempDir}/recovered` + await recoverRawContent(emptyFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual(randomData) }) test('writeData in 2 non-overlaping operations', async () => { const mbOfRandom = 3 - await createRandomFile('randomfile', mbOfRandom) - await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M']) - const randomData = await fs.readFile('randomfile') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler.getSize('randomfile') - const newVhd = new Vhd(handler, 'empty.vhd') + const rawFileName = `${tempDir}/randomfile` + const emptyFileName = `${tempDir}/empty.vhd` + const recoveredFileName = `${tempDir}/recovered` + await createRandomFile(rawFileName, mbOfRandom) + await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M']) + const randomData = await fs.readFile(rawFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler.getSize(rawFileName) + const newVhd = new Vhd(handler, emptyFileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() const splitPointSectors = 2 @@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => { splitPointSectors, randomData.slice(splitPointSectors * 512) ) - await recoverRawContent('empty.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual(randomData) + await recoverRawContent(emptyFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual(randomData) }) test('writeData in 2 overlaping operations', async () => { const mbOfRandom = 3 - await createRandomFile('randomfile', mbOfRandom) - await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M']) - const randomData = await fs.readFile('randomfile') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler.getSize('randomfile') - const newVhd = new Vhd(handler, 'empty.vhd') + const rawFileName = `${tempDir}/randomfile` + const emptyFileName = `${tempDir}/empty.vhd` + const recoveredFileName = `${tempDir}/recovered` + await createRandomFile(rawFileName, mbOfRandom) + await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M']) + const randomData = await fs.readFile(rawFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler.getSize(rawFileName) + const newVhd = new Vhd(handler, emptyFileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() const endFirstWrite = 3 @@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => { startSecondWrite, randomData.slice(startSecondWrite * 512) ) - await recoverRawContent('empty.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual(randomData) + await recoverRawContent(emptyFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual(randomData) }) test('BAT can be extended and blocks moved', async () => { const initalSize = 4 - await createRandomFile('randomfile', initalSize) - await convertFromRawToVhd('randomfile', 'randomfile.vhd') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler.getSize('randomfile') - const newVhd = new Vhd(handler, 'randomfile.vhd') + const rawFileName = `${tempDir}/randomfile` + const recoveredFileName = `${tempDir}/recovered` + const vhdFileName = `${tempDir}/randomfile.vhd` + await createRandomFile(rawFileName, initalSize) + await convertFromRawToVhd(rawFileName, vhdFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler.getSize(rawFileName) + const newVhd = new Vhd(handler, vhdFileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() await newVhd.ensureBatSize(2000) - await recoverRawContent('randomfile.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual( - await fs.readFile('randomfile') + await recoverRawContent(vhdFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual( + await fs.readFile(rawFileName) ) }) test('coalesce works with empty parent files', async () => { const mbOfRandom = 2 - await createRandomFile('randomfile', mbOfRandom) - await convertFromRawToVhd('randomfile', 'randomfile.vhd') + const rawFileName = `${tempDir}/randomfile` + const emptyFileName = `${tempDir}/empty.vhd` + const vhdFileName = `${tempDir}/randomfile.vhd` + const recoveredFileName = `${tempDir}/recovered` + await createRandomFile(rawFileName, mbOfRandom) + await convertFromRawToVhd(rawFileName, vhdFileName) await execa('qemu-img', [ 'create', '-fvpc', - 'empty.vhd', + emptyFileName, mbOfRandom + 1 + 'M', ]) - await checkFile('randomfile.vhd') - await checkFile('empty.vhd') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const originalSize = await handler._getSize('randomfile') - await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true) - await checkFile('randomfile.vhd') - await checkFile('empty.vhd') - await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd') - await recoverRawContent('empty.vhd', 'recovered', originalSize) - expect(await fs.readFile('recovered')).toEqual( - await fs.readFile('randomfile') + await checkFile(vhdFileName) + await checkFile(emptyFileName) + const handler = getHandler({ url: 'file://' }) + const originalSize = await handler._getSize(rawFileName) + await chainVhd(handler, emptyFileName, handler, vhdFileName, true) + await checkFile(vhdFileName) + await checkFile(emptyFileName) + await vhdMerge(handler, emptyFileName, handler, vhdFileName) + await recoverRawContent(emptyFileName, recoveredFileName, originalSize) + expect(await fs.readFile(recoveredFileName)).toEqual( + await fs.readFile(rawFileName) ) }) test('coalesce works in normal cases', async () => { const mbOfRandom = 5 - await createRandomFile('randomfile', mbOfRandom) - await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2)) + const randomFileName = `${tempDir}/randomfile` + const random2FileName = `${tempDir}/randomfile2` + const smallRandomFileName = `${tempDir}/small_randomfile` + const parentFileName = `${tempDir}/parent.vhd` + const child1FileName = `${tempDir}/child1.vhd` + const child2FileName = `${tempDir}/child2.vhd` + const recoveredFileName = `${tempDir}/recovered` + await createRandomFile(randomFileName, mbOfRandom) + await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2)) await execa('qemu-img', [ 'create', '-fvpc', - 'parent.vhd', + parentFileName, mbOfRandom + 1 + 'M', ]) - await convertFromRawToVhd('randomfile', 'child1.vhd') - const handler = getHandler({ url: 'file://' + process.cwd() }) - await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd']) - const vhd = new Vhd(handler, 'child2.vhd') + await convertFromRawToVhd(randomFileName, child1FileName) + const handler = getHandler({ url: 'file://' }) + await execa('vhd-util', [ + 'snapshot', + '-n', + child2FileName, + '-p', + child1FileName, + ]) + const vhd = new Vhd(handler, child2FileName) await vhd.readHeaderAndFooter() await vhd.readBlockAllocationTable() vhd.footer.creatorApplication = 'xoa' await vhd.writeFooter() - const originalSize = await handler._getSize('randomfile') - await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true) - await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd']) - await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true) - await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd']) - const smallRandom = await fs.readFile('small_randomfile') - const newVhd = new Vhd(handler, 'child2.vhd') + const originalSize = await handler._getSize(randomFileName) + await chainVhd(handler, parentFileName, handler, child1FileName, true) + await execa('vhd-util', ['check', '-t', '-n', child1FileName]) + await chainVhd(handler, child1FileName, handler, child2FileName, true) + await execa('vhd-util', ['check', '-t', '-n', child2FileName]) + const smallRandom = await fs.readFile(smallRandomFileName) + const newVhd = new Vhd(handler, child2FileName) await newVhd.readHeaderAndFooter() await newVhd.readBlockAllocationTable() await newVhd.writeData(5, smallRandom) - await checkFile('child2.vhd') - await checkFile('child1.vhd') - await checkFile('parent.vhd') - await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd') - await checkFile('parent.vhd') - await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true) - await checkFile('child2.vhd') - await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd') - await checkFile('parent.vhd') - await recoverRawContent( - 'parent.vhd', - 'recovered_from_coalescing', - originalSize - ) - await execa('cp', ['randomfile', 'randomfile2']) - const fd = await fs.open('randomfile2', 'r+') + await checkFile(child2FileName) + await checkFile(child1FileName) + await checkFile(parentFileName) + await vhdMerge(handler, parentFileName, handler, child1FileName) + await checkFile(parentFileName) + await chainVhd(handler, parentFileName, handler, child2FileName, true) + await checkFile(child2FileName) + await vhdMerge(handler, parentFileName, handler, child2FileName) + await checkFile(parentFileName) + await recoverRawContent(parentFileName, recoveredFileName, originalSize) + await execa('cp', [randomFileName, random2FileName]) + const fd = await fs.open(random2FileName, 'r+') try { await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE) } finally { await fs.close(fd) } - expect(await fs.readFile('recovered_from_coalescing')).toEqual( - await fs.readFile('randomfile2') + expect(await fs.readFile(recoveredFileName)).toEqual( + await fs.readFile(random2FileName) ) }) -test('createSyntheticStream passes vhd-util check', async () => { +test.only('createSyntheticStream passes vhd-util check', async () => { const initalSize = 4 - const expectedVhdSize = 4197888 - await createRandomFile('randomfile', initalSize) - await convertFromRawToVhd('randomfile', 'randomfile.vhd') - const handler = getHandler({ url: 'file://' + process.cwd() }) - const stream = await createSyntheticStream(handler, 'randomfile.vhd') - expect(stream.length).toEqual(expectedVhdSize) - await fromEvent( - stream.pipe(await fs.createWriteStream('recovered.vhd')), - 'finish' + const rawFileName = `${tempDir}/randomfile` + const vhdFileName = `${tempDir}/randomfile.vhd` + const recoveredVhdFileName = `${tempDir}/recovered.vhd` + await createRandomFile(rawFileName, initalSize) + await convertFromRawToVhd(rawFileName, vhdFileName) + await checkFile(vhdFileName) + const handler = getHandler({ url: 'file://' }) + const stream = await createSyntheticStream(handler, vhdFileName) + const expectedVhdSize = (await fs.stat(vhdFileName)).size + expect(stream.length).toEqual((await fs.stat(vhdFileName)).size) + await pFromCallback(cb => + pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb) ) - await checkFile('recovered.vhd') - const stats = await fs.stat('recovered.vhd') + await checkFile(recoveredVhdFileName) + const stats = await fs.stat(recoveredVhdFileName) expect(stats.size).toEqual(expectedVhdSize) - await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile']) + await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName]) }) diff --git a/packages/vhd-lib/package.json b/packages/vhd-lib/package.json index c49cd4ea4..43897b0c8 100644 --- a/packages/vhd-lib/package.json +++ b/packages/vhd-lib/package.json @@ -42,6 +42,7 @@ "fs-promise": "^2.0.0", "get-stream": "^4.0.0", "index-modules": "^0.3.0", + "readable-stream": "^3.0.6", "rimraf": "^2.6.2", "tmp": "^0.0.33" }, diff --git a/packages/vhd-lib/src/_checkFooter.js b/packages/vhd-lib/src/_checkFooter.js new file mode 100644 index 000000000..1d2803055 --- /dev/null +++ b/packages/vhd-lib/src/_checkFooter.js @@ -0,0 +1,20 @@ +import assert from 'assert' + +import { + DISK_TYPE_DIFFERENCING, + DISK_TYPE_DYNAMIC, + FILE_FORMAT_VERSION, + FOOTER_COOKIE, + FOOTER_SIZE, +} from './_constants' + +export default footer => { + assert.strictEqual(footer.cookie, FOOTER_COOKIE) + assert.strictEqual(footer.dataOffset, FOOTER_SIZE) + assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION) + assert(footer.originalSize <= footer.currentSize) + assert( + footer.diskType === DISK_TYPE_DIFFERENCING || + footer.diskType === DISK_TYPE_DYNAMIC + ) +} diff --git a/packages/vhd-lib/src/_checkHeader.js b/packages/vhd-lib/src/_checkHeader.js new file mode 100644 index 000000000..332dcd759 --- /dev/null +++ b/packages/vhd-lib/src/_checkHeader.js @@ -0,0 +1,14 @@ +import assert from 'assert' + +import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants' + +export default (header, footer) => { + assert.strictEqual(header.cookie, HEADER_COOKIE) + assert.strictEqual(header.dataOffset, undefined) + assert.strictEqual(header.headerVersion, HEADER_VERSION) + assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE))) + + if (footer !== undefined) { + assert(header.maxTableEntries >= footer.currentSize / header.blockSize) + } +} diff --git a/packages/vhd-lib/src/_getFirstAndLastBlocks.js b/packages/vhd-lib/src/_getFirstAndLastBlocks.js new file mode 100644 index 000000000..dadd29ee0 --- /dev/null +++ b/packages/vhd-lib/src/_getFirstAndLastBlocks.js @@ -0,0 +1,47 @@ +import assert from 'assert' + +import { BLOCK_UNUSED } from './_constants' + +// get the identifiers and first sectors of the first and last block +// in the file +export default bat => { + const n = bat.length + assert.notStrictEqual(n, 0) + assert.strictEqual(n % 4, 0) + + let i = 0 + let j = 0 + let first, firstSector, last, lastSector + + // get first allocated block for initialization + while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) { + i += 1 + j += 4 + + if (j === n) { + const error = new Error('no allocated block found') + error.noBlock = true + throw error + } + } + lastSector = firstSector + first = last = i + + while (j < n) { + const sector = bat.readUInt32BE(j) + if (sector !== BLOCK_UNUSED) { + if (sector < firstSector) { + first = i + firstSector = sector + } else if (sector > lastSector) { + last = i + lastSector = sector + } + } + + i += 1 + j += 4 + } + + return { first, firstSector, last, lastSector } +} diff --git a/packages/vhd-lib/src/_readChunk.js b/packages/vhd-lib/src/_readChunk.js new file mode 100644 index 000000000..0d049b797 --- /dev/null +++ b/packages/vhd-lib/src/_readChunk.js @@ -0,0 +1,50 @@ +export default async function readChunk(stream, n) { + if (n === 0) { + return Buffer.alloc(0) + } + return new Promise((resolve, reject) => { + const chunks = [] + let i = 0 + + function clean() { + stream.removeListener('readable', onReadable) + stream.removeListener('end', onEnd) + stream.removeListener('error', onError) + } + + function resolve2() { + clean() + resolve(Buffer.concat(chunks, i)) + } + + function onEnd() { + resolve2() + clean() + } + + function onError(error) { + reject(error) + clean() + } + + function onReadable() { + const chunk = stream.read(n - i) + if (chunk === null) { + return // wait for more data + } + i += chunk.length + chunks.push(chunk) + if (i >= n) { + resolve2() + } + } + + stream.on('end', onEnd) + stream.on('error', onError) + stream.on('readable', onReadable) + + if (stream.readable) { + onReadable() + } + }) +} diff --git a/packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js b/packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js new file mode 100644 index 000000000..1737fa599 --- /dev/null +++ b/packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js @@ -0,0 +1,93 @@ +/* eslint-env jest */ + +import asyncIteratorToStream from 'async-iterator-to-stream' +import execa from 'execa' +import fs from 'fs-extra' +import rimraf from 'rimraf' +import getStream from 'get-stream' +import tmp from 'tmp' +import { createReadStream, createWriteStream } from 'fs' +import { pFromCallback } from 'promise-toolbox' +import { pipeline } from 'readable-stream' + +import { createVhdStreamWithLength } from '.' +import { FOOTER_SIZE } from './_constants' + +let tempDir = null + +beforeEach(async () => { + tempDir = await pFromCallback(cb => tmp.dir(cb)) +}) + +afterEach(async () => { + await pFromCallback(cb => rimraf(tempDir, cb)) +}) + +async function convertFromRawToVhd(rawName, vhdName) { + await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName]) +} + +async function createRandomFile(name, size) { + const createRandomStream = asyncIteratorToStream(function*(size) { + while (size-- > 0) { + yield Buffer.from([Math.floor(Math.random() * 256)]) + } + }) + const input = await createRandomStream(size) + await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb)) +} + +test('createVhdStreamWithLength can extract length', async () => { + const initialSize = 4 * 1024 + const rawFileName = `${tempDir}/randomfile` + const vhdName = `${tempDir}/randomfile.vhd` + const outputVhdName = `${tempDir}/output.vhd` + await createRandomFile(rawFileName, initialSize) + await convertFromRawToVhd(rawFileName, vhdName) + const vhdSize = fs.statSync(vhdName).size + const result = await createVhdStreamWithLength( + await createReadStream(vhdName) + ) + expect(result.length).toEqual(vhdSize) + const outputFileStream = await createWriteStream(outputVhdName) + await pFromCallback(cb => pipeline(result, outputFileStream, cb)) + const outputSize = fs.statSync(outputVhdName).size + expect(outputSize).toEqual(vhdSize) +}) + +test('createVhdStreamWithLength can skip blank after last block and before footer', async () => { + const initialSize = 4 * 1024 + const rawFileName = `${tempDir}/randomfile` + const vhdName = `${tempDir}/randomfile.vhd` + const outputVhdName = `${tempDir}/output.vhd` + await createRandomFile(rawFileName, initialSize) + await convertFromRawToVhd(rawFileName, vhdName) + const vhdSize = fs.statSync(vhdName).size + // read file footer + const footer = await getStream.buffer( + createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE }) + ) + + // we'll override the footer + const endOfFile = await createWriteStream(vhdName, { + flags: 'r+', + start: vhdSize - FOOTER_SIZE, + }) + // write a blank over the previous footer + await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb)) + // write the footer after the new blank + await pFromCallback(cb => endOfFile.end(footer, cb)) + const longerSize = fs.statSync(vhdName).size + // check input file has been lengthened + expect(longerSize).toEqual(vhdSize + FOOTER_SIZE) + const result = await createVhdStreamWithLength( + await createReadStream(vhdName) + ) + expect(result.length).toEqual(vhdSize) + const outputFileStream = await createWriteStream(outputVhdName) + await pFromCallback(cb => pipeline(result, outputFileStream, cb)) + const outputSize = fs.statSync(outputVhdName).size + // check out file has been shortened again + expect(outputSize).toEqual(vhdSize) + await execa('qemu-img', ['compare', outputVhdName, vhdName]) +}) diff --git a/packages/vhd-lib/src/createVhdStreamWithLength.js b/packages/vhd-lib/src/createVhdStreamWithLength.js new file mode 100644 index 000000000..ac19de8e0 --- /dev/null +++ b/packages/vhd-lib/src/createVhdStreamWithLength.js @@ -0,0 +1,80 @@ +import assert from 'assert' +import { pipeline, Transform } from 'readable-stream' + +import checkFooter from './_checkFooter' +import checkHeader from './_checkHeader' +import noop from './_noop' +import getFirstAndLastBlocks from './_getFirstAndLastBlocks' +import readChunk from './_readChunk' +import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants' +import { fuFooter, fuHeader } from './_structs' + +class EndCutterStream extends Transform { + constructor(footerOffset, footerBuffer) { + super() + this._footerOffset = footerOffset + this._footerBuffer = footerBuffer + this._position = 0 + this._done = false + } + + _transform(data, encoding, callback) { + if (!this._done) { + if (this._position + data.length >= this._footerOffset) { + this._done = true + const difference = this._footerOffset - this._position + data = data.slice(0, difference) + this.push(data) + this.push(this._footerBuffer) + } else { + this.push(data) + } + this._position += data.length + } + callback() + } +} + +export default async function createVhdStreamWithLength(stream) { + const readBuffers = [] + let streamPosition = 0 + + async function readStream(length) { + const chunk = await readChunk(stream, length) + assert.strictEqual(chunk.length, length) + streamPosition += chunk.length + readBuffers.push(chunk) + return chunk + } + + const footerBuffer = await readStream(FOOTER_SIZE) + const footer = fuFooter.unpack(footerBuffer) + checkFooter(footer) + + const header = fuHeader.unpack(await readStream(HEADER_SIZE)) + checkHeader(header, footer) + + await readStream(header.tableOffset - streamPosition) + + const table = await readStream(header.maxTableEntries * 4) + + readBuffers.reverse() + for (const buf of readBuffers) { + stream.unshift(buf) + } + + const footerOffset = + getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE + + Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE + + header.blockSize + + // ignore any data after footerOffset and push footerBuffer + // + // this is necessary to ignore any blank space between the last block and the + // final footer which would invalidate the size we computed + const newStream = new EndCutterStream(footerOffset, footerBuffer) + pipeline(stream, newStream, noop) + + newStream.length = footerOffset + FOOTER_SIZE + return newStream +} diff --git a/packages/vhd-lib/src/index.js b/packages/vhd-lib/src/index.js index dcbe20080..3d033b241 100644 --- a/packages/vhd-lib/src/index.js +++ b/packages/vhd-lib/src/index.js @@ -11,3 +11,6 @@ export { } from './createReadableSparseStream' export { default as createSyntheticStream } from './createSyntheticStream' export { default as mergeVhd } from './merge' +export { + default as createVhdStreamWithLength, +} from './createVhdStreamWithLength' diff --git a/packages/vhd-lib/src/vhd.js b/packages/vhd-lib/src/vhd.js index e0401e61c..6f67b6cf0 100644 --- a/packages/vhd-lib/src/vhd.js +++ b/packages/vhd-lib/src/vhd.js @@ -1,19 +1,16 @@ import assert from 'assert' import { fromEvent } from 'promise-toolbox' +import checkFooter from './_checkFooter' +import checkHeader from './_checkHeader' import constantStream from './_constant-stream' +import getFirstAndLastBlocks from './_getFirstAndLastBlocks' import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs' import { set as mapSetBit, test as mapTestBit } from './_bitmap' import { BLOCK_UNUSED, - DISK_TYPE_DIFFERENCING, - DISK_TYPE_DYNAMIC, - FILE_FORMAT_VERSION, - FOOTER_COOKIE, FOOTER_SIZE, - HEADER_COOKIE, HEADER_SIZE, - HEADER_VERSION, PARENT_LOCATOR_ENTRIES, PLATFORM_NONE, PLATFORM_W2KU, @@ -170,21 +167,10 @@ export default class Vhd { } const footer = (this.footer = fuFooter.unpack(bufFooter)) - assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie') - assert.strictEqual(footer.dataOffset, FOOTER_SIZE) - assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION) - assert(footer.originalSize <= footer.currentSize) - assert( - footer.diskType === DISK_TYPE_DIFFERENCING || - footer.diskType === DISK_TYPE_DYNAMIC - ) + checkFooter(footer) const header = (this.header = fuHeader.unpack(bufHeader)) - assert.strictEqual(header.cookie, HEADER_COOKIE) - assert.strictEqual(header.dataOffset, undefined) - assert.strictEqual(header.headerVersion, HEADER_VERSION) - assert(header.maxTableEntries >= footer.currentSize / header.blockSize) - assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE))) + checkHeader(header, footer) // Compute the number of sectors in one block. // Default: One block contains 4096 sectors of 512 bytes. @@ -242,49 +228,6 @@ export default class Vhd { ) } - // get the identifiers and first sectors of the first and last block - // in the file - // - _getFirstAndLastBlocks() { - const n = this.header.maxTableEntries - const bat = this.blockTable - let i = 0 - let j = 0 - let first, firstSector, last, lastSector - - // get first allocated block for initialization - while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) { - i += 1 - j += 4 - - if (i === n) { - const error = new Error('no allocated block found') - error.noBlock = true - throw error - } - } - lastSector = firstSector - first = last = i - - while (i < n) { - const sector = bat.readUInt32BE(j) - if (sector !== BLOCK_UNUSED) { - if (sector < firstSector) { - first = i - firstSector = sector - } else if (sector > lastSector) { - last = i - lastSector = sector - } - } - - i += 1 - j += 4 - } - - return { first, firstSector, last, lastSector } - } - // ================================================================= // Write functions. // ================================================================= @@ -311,7 +254,9 @@ export default class Vhd { async _freeFirstBlockSpace(spaceNeededBytes) { try { - const { first, firstSector, lastSector } = this._getFirstAndLastBlocks() + const { first, firstSector, lastSector } = getFirstAndLastBlocks( + this.blockTable + ) const tableOffset = this.header.tableOffset const { batSize } = this const newMinSector = Math.ceil( diff --git a/packages/vhd-lib/vhd.integ.spec.js b/packages/vhd-lib/vhd.integ.spec.js index c4837441b..ca976aebb 100644 --- a/packages/vhd-lib/vhd.integ.spec.js +++ b/packages/vhd-lib/vhd.integ.spec.js @@ -4,22 +4,20 @@ import rimraf from 'rimraf' import tmp from 'tmp' import { createWriteStream, readFile } from 'fs-promise' import { fromEvent, pFromCallback } from 'promise-toolbox' +import { pipeline } from 'readable-stream' import { createReadableRawStream, createReadableSparseStream } from './' import { createFooter } from './src/_createFooterHeader' -const initialDir = process.cwd() +let tempDir = null beforeEach(async () => { - const dir = await pFromCallback(cb => tmp.dir(cb)) - process.chdir(dir) + tempDir = await pFromCallback(cb => tmp.dir(cb)) }) afterEach(async () => { - const tmpDir = process.cwd() - process.chdir(initialDir) - await pFromCallback(cb => rimraf(tmpDir, cb)) + await pFromCallback(cb => rimraf(tempDir, cb)) }) test('createFooter() does not crash', () => { @@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => { } const fileSize = 1000 const stream = createReadableRawStream(fileSize, mockParser) - const pipe = stream.pipe(createWriteStream('output.vhd')) - await fromEvent(pipe, 'finish') - await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd']) + await pFromCallback(cb => + pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb) + ) + await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`]) }) test('ReadableRawVHDStream detects when blocks are out of order', async () => { @@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => { new Promise((resolve, reject) => { const stream = createReadableRawStream(100000, mockParser) stream.on('error', reject) - const pipe = stream.pipe(createWriteStream('outputStream')) - pipe.on('finish', resolve) - pipe.on('error', reject) + pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err => + err ? reject(err) : resolve() + ) }) ).rejects.toThrow('Received out of order blocks') }) @@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => { blocks ) expect(stream.length).toEqual(4197888) - const pipe = stream.pipe(createWriteStream('output.vhd')) + const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`)) await fromEvent(pipe, 'finish') - await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd']) + await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`]) await execa('qemu-img', [ 'convert', '-f', 'vpc', '-O', 'raw', - 'output.vhd', - 'out1.raw', + `${tempDir}/output.vhd`, + `${tempDir}/out1.raw`, ]) - const out1 = await readFile('out1.raw') + const out1 = await readFile(`${tempDir}/out1.raw`) const expected = Buffer.alloc(fileSize) blocks.forEach(b => { b.data.copy(expected, b.offsetBytes) diff --git a/packages/xo-server/config.toml b/packages/xo-server/config.toml index baa810cbf..90278752a 100644 --- a/packages/xo-server/config.toml +++ b/packages/xo-server/config.toml @@ -9,6 +9,18 @@ datadir = '/var/lib/xo-server/data' # Necessary for external authentication providers. createUserOnFirstSignin = true +# XAPI does not support chunked encoding in HTTP requests which is necessary +# when the content length is not know which is the case for many backup related +# operations in XO. +# +# It's possible to work-around this for VHDs because it's possible to guess +# their size just by looking at the beginning of the stream. +# +# But it is a guess, not a certainty, it depends on how the VHDs are formatted +# by XenServer, therefore it's disabled for the moment but can be enabled +# specifically for a user if necessary. +guessVhdSizeOnImport = false + # Whether API logs should contains the full request/response on # errors. # diff --git a/packages/xo-server/src/xapi/index.js b/packages/xo-server/src/xapi/index.js index 78f2d7e86..5c181784e 100644 --- a/packages/xo-server/src/xapi/index.js +++ b/packages/xo-server/src/xapi/index.js @@ -68,6 +68,7 @@ import { parseDateTime, prepareXapiParam, } from './utils' +import { createVhdStreamWithLength } from 'vhd-lib' const log = createLogger('xo:xapi') @@ -93,8 +94,10 @@ export const IPV6_CONFIG_MODES = ['None', 'DHCP', 'Static', 'Autoconf'] @mixin(mapToArray(mixins)) export default class Xapi extends XapiBase { - constructor(...args) { - super(...args) + constructor({ guessVhdSizeOnImport, ...opts }) { + super(opts) + + this._guessVhdSizeOnImport = guessVhdSizeOnImport // Patch getObject to resolve _xapiId property. this.getObject = (getObject => (...args) => { @@ -2095,11 +2098,16 @@ export default class Xapi extends XapiBase { // ----------------------------------------------------------------- async _importVdiContent(vdi, body, format = VDI_FORMAT_VHD) { - if (__DEV__ && body.length == null) { - throw new Error( - 'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.' - ) + if (typeof body.pipe === 'function' && body.length === undefined) { + if (this._guessVhdSizeOnImport && format === VDI_FORMAT_VHD) { + body = await createVhdStreamWithLength(body) + } else if (__DEV__) { + throw new Error( + 'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.' + ) + } } + await Promise.all([ body.task, body.checksumVerified, diff --git a/packages/xo-server/src/xo-mixins/xen-servers.js b/packages/xo-server/src/xo-mixins/xen-servers.js index 2b9e96d00..082383a5b 100644 --- a/packages/xo-server/src/xo-mixins/xen-servers.js +++ b/packages/xo-server/src/xo-mixins/xen-servers.js @@ -40,7 +40,7 @@ const log = createLogger('xo:xo-mixins:xen-servers') // - _xapis[server.id] id defined // - _serverIdsByPool[xapi.pool.$id] is server.id export default class { - constructor(xo, { xapiOptions }) { + constructor(xo, { guessVhdSizeOnImport, xapiOptions }) { this._objectConflicts = { __proto__: null } // TODO: clean when a server is disconnected. const serversDb = (this._servers = new Servers({ connection: xo._redis, @@ -49,7 +49,10 @@ export default class { })) this._serverIdsByPool = { __proto__: null } this._stats = new XapiStats() - this._xapiOptions = xapiOptions + this._xapiOptions = { + guessVhdSizeOnImport, + ...xapiOptions, + } this._xapis = { __proto__: null } this._xo = xo