feat(vhd-lib,xo-server): guess VHD size on import (#3726)
This commit is contained in:
parent
ecfed30e6e
commit
2af1207702
@ -6,6 +6,7 @@
|
|||||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||||
|
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||||
|
|
||||||
### Bug fixes
|
### Bug fixes
|
||||||
|
|
||||||
@ -17,6 +18,7 @@
|
|||||||
|
|
||||||
### Released packages
|
### Released packages
|
||||||
|
|
||||||
|
- vhd-lib v0.6.0
|
||||||
- @xen-orchestra/fs v0.8.0
|
- @xen-orchestra/fs v0.8.0
|
||||||
- xo-server v5.38.0
|
- xo-server v5.38.0
|
||||||
- xo-web v5.38.0
|
- xo-web v5.38.0
|
||||||
|
@ -1,38 +1,40 @@
|
|||||||
/* eslint-env jest */
|
/* eslint-env jest */
|
||||||
|
|
||||||
|
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||||
import execa from 'execa'
|
import execa from 'execa'
|
||||||
import fs from 'fs-extra'
|
import fs from 'fs-extra'
|
||||||
import getStream from 'get-stream'
|
import getStream from 'get-stream'
|
||||||
import rimraf from 'rimraf'
|
import rimraf from 'rimraf'
|
||||||
import tmp from 'tmp'
|
import tmp from 'tmp'
|
||||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
|
||||||
import { getHandler } from '@xen-orchestra/fs'
|
import { getHandler } from '@xen-orchestra/fs'
|
||||||
|
import { pFromCallback } from 'promise-toolbox'
|
||||||
|
import { pipeline } from 'readable-stream'
|
||||||
import { randomBytes } from 'crypto'
|
import { randomBytes } from 'crypto'
|
||||||
|
|
||||||
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
|
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
|
||||||
|
|
||||||
import { SECTOR_SIZE } from './src/_constants'
|
import { SECTOR_SIZE } from './src/_constants'
|
||||||
|
|
||||||
const initialDir = process.cwd()
|
let tempDir = null
|
||||||
|
|
||||||
jest.setTimeout(60000)
|
jest.setTimeout(60000)
|
||||||
|
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||||
process.chdir(dir)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
afterEach(async () => {
|
afterEach(async () => {
|
||||||
const tmpDir = process.cwd()
|
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||||
process.chdir(initialDir)
|
|
||||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
async function createRandomFile(name, sizeMb) {
|
async function createRandomFile(name, sizeMB) {
|
||||||
await execa('bash', [
|
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||||
'-c',
|
while (size-- > 0) {
|
||||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||||
])
|
}
|
||||||
|
})
|
||||||
|
const input = createRandomStream(sizeMB * 1024 * 1024)
|
||||||
|
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||||
}
|
}
|
||||||
|
|
||||||
async function checkFile(vhdName) {
|
async function checkFile(vhdName) {
|
||||||
@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
|
|||||||
|
|
||||||
test('blocks can be moved', async () => {
|
test('blocks can be moved', async () => {
|
||||||
const initalSize = 4
|
const initalSize = 4
|
||||||
await createRandomFile('randomfile', initalSize)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
await createRandomFile(rawFileName, initalSize)
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||||
const originalSize = await handler.getSize('randomfile')
|
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const originalSize = await handler.getSize(rawFileName)
|
||||||
|
const newVhd = new Vhd(handler, vhdFileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
await newVhd._freeFirstBlockSpace(8000000)
|
await newVhd._freeFirstBlockSpace(8000000)
|
||||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
expect(await fs.readFile('recovered')).toEqual(
|
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||||
await fs.readFile('randomfile')
|
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||||
|
await fs.readFile(rawFileName)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('the BAT MSB is not used for sign', async () => {
|
test('the BAT MSB is not used for sign', async () => {
|
||||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
const emptyFileName = `${tempDir}/empty.vhd`
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
|
||||||
const vhd = new Vhd(handler, 'empty.vhd')
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const vhd = new Vhd(handler, emptyFileName)
|
||||||
await vhd.readHeaderAndFooter()
|
await vhd.readHeaderAndFooter()
|
||||||
await vhd.readBlockAllocationTable()
|
await vhd.readBlockAllocationTable()
|
||||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||||
await checkFile('empty.vhd')
|
await checkFile(emptyFileName)
|
||||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||||
@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
|
|||||||
// we recover the data manually for speed reasons.
|
// we recover the data manually for speed reasons.
|
||||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||||
// hole before the block of data
|
// hole before the block of data
|
||||||
const recoveredFile = await fs.open('recovered', 'w')
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
|
const recoveredFile = await fs.open(recoveredFileName, 'w')
|
||||||
try {
|
try {
|
||||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
const vhd2 = new Vhd(handler, emptyFileName)
|
||||||
await vhd2.readHeaderAndFooter()
|
await vhd2.readHeaderAndFooter()
|
||||||
await vhd2.readBlockAllocationTable()
|
await vhd2.readBlockAllocationTable()
|
||||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||||
@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
|
|||||||
fs.close(recoveredFile)
|
fs.close(recoveredFile)
|
||||||
}
|
}
|
||||||
const recovered = await getStream.buffer(
|
const recovered = await getStream.buffer(
|
||||||
await fs.createReadStream('recovered', {
|
await fs.createReadStream(recoveredFileName, {
|
||||||
start: hugePositionBytes,
|
start: hugePositionBytes,
|
||||||
end: hugePositionBytes + randomBuffer.length - 1,
|
end: hugePositionBytes + randomBuffer.length - 1,
|
||||||
})
|
})
|
||||||
@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
|
|||||||
|
|
||||||
test('writeData on empty file', async () => {
|
test('writeData on empty file', async () => {
|
||||||
const mbOfRandom = 3
|
const mbOfRandom = 3
|
||||||
await createRandomFile('randomfile', mbOfRandom)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
const emptyFileName = `${tempDir}/empty.vhd`
|
||||||
const randomData = await fs.readFile('randomfile')
|
await createRandomFile(rawFileName, mbOfRandom)
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||||
const originalSize = await handler.getSize('randomfile')
|
const randomData = await fs.readFile(rawFileName)
|
||||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const originalSize = await handler.getSize(rawFileName)
|
||||||
|
const newVhd = new Vhd(handler, emptyFileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
await newVhd.writeData(0, randomData)
|
await newVhd.writeData(0, randomData)
|
||||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||||
|
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('writeData in 2 non-overlaping operations', async () => {
|
test('writeData in 2 non-overlaping operations', async () => {
|
||||||
const mbOfRandom = 3
|
const mbOfRandom = 3
|
||||||
await createRandomFile('randomfile', mbOfRandom)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
const emptyFileName = `${tempDir}/empty.vhd`
|
||||||
const randomData = await fs.readFile('randomfile')
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
await createRandomFile(rawFileName, mbOfRandom)
|
||||||
const originalSize = await handler.getSize('randomfile')
|
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
const randomData = await fs.readFile(rawFileName)
|
||||||
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const originalSize = await handler.getSize(rawFileName)
|
||||||
|
const newVhd = new Vhd(handler, emptyFileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
const splitPointSectors = 2
|
const splitPointSectors = 2
|
||||||
@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
|
|||||||
splitPointSectors,
|
splitPointSectors,
|
||||||
randomData.slice(splitPointSectors * 512)
|
randomData.slice(splitPointSectors * 512)
|
||||||
)
|
)
|
||||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('writeData in 2 overlaping operations', async () => {
|
test('writeData in 2 overlaping operations', async () => {
|
||||||
const mbOfRandom = 3
|
const mbOfRandom = 3
|
||||||
await createRandomFile('randomfile', mbOfRandom)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
const emptyFileName = `${tempDir}/empty.vhd`
|
||||||
const randomData = await fs.readFile('randomfile')
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
await createRandomFile(rawFileName, mbOfRandom)
|
||||||
const originalSize = await handler.getSize('randomfile')
|
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
const randomData = await fs.readFile(rawFileName)
|
||||||
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const originalSize = await handler.getSize(rawFileName)
|
||||||
|
const newVhd = new Vhd(handler, emptyFileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
const endFirstWrite = 3
|
const endFirstWrite = 3
|
||||||
@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
|
|||||||
startSecondWrite,
|
startSecondWrite,
|
||||||
randomData.slice(startSecondWrite * 512)
|
randomData.slice(startSecondWrite * 512)
|
||||||
)
|
)
|
||||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('BAT can be extended and blocks moved', async () => {
|
test('BAT can be extended and blocks moved', async () => {
|
||||||
const initalSize = 4
|
const initalSize = 4
|
||||||
await createRandomFile('randomfile', initalSize)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||||
const originalSize = await handler.getSize('randomfile')
|
await createRandomFile(rawFileName, initalSize)
|
||||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||||
|
const handler = getHandler({ url: 'file://' })
|
||||||
|
const originalSize = await handler.getSize(rawFileName)
|
||||||
|
const newVhd = new Vhd(handler, vhdFileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
await newVhd.ensureBatSize(2000)
|
await newVhd.ensureBatSize(2000)
|
||||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||||
expect(await fs.readFile('recovered')).toEqual(
|
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||||
await fs.readFile('randomfile')
|
await fs.readFile(rawFileName)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('coalesce works with empty parent files', async () => {
|
test('coalesce works with empty parent files', async () => {
|
||||||
const mbOfRandom = 2
|
const mbOfRandom = 2
|
||||||
await createRandomFile('randomfile', mbOfRandom)
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
const emptyFileName = `${tempDir}/empty.vhd`
|
||||||
|
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||||
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
|
await createRandomFile(rawFileName, mbOfRandom)
|
||||||
|
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||||
await execa('qemu-img', [
|
await execa('qemu-img', [
|
||||||
'create',
|
'create',
|
||||||
'-fvpc',
|
'-fvpc',
|
||||||
'empty.vhd',
|
emptyFileName,
|
||||||
mbOfRandom + 1 + 'M',
|
mbOfRandom + 1 + 'M',
|
||||||
])
|
])
|
||||||
await checkFile('randomfile.vhd')
|
await checkFile(vhdFileName)
|
||||||
await checkFile('empty.vhd')
|
await checkFile(emptyFileName)
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
const handler = getHandler({ url: 'file://' })
|
||||||
const originalSize = await handler._getSize('randomfile')
|
const originalSize = await handler._getSize(rawFileName)
|
||||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
|
||||||
await checkFile('randomfile.vhd')
|
await checkFile(vhdFileName)
|
||||||
await checkFile('empty.vhd')
|
await checkFile(emptyFileName)
|
||||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
|
||||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||||
expect(await fs.readFile('recovered')).toEqual(
|
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||||
await fs.readFile('randomfile')
|
await fs.readFile(rawFileName)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('coalesce works in normal cases', async () => {
|
test('coalesce works in normal cases', async () => {
|
||||||
const mbOfRandom = 5
|
const mbOfRandom = 5
|
||||||
await createRandomFile('randomfile', mbOfRandom)
|
const randomFileName = `${tempDir}/randomfile`
|
||||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
const random2FileName = `${tempDir}/randomfile2`
|
||||||
|
const smallRandomFileName = `${tempDir}/small_randomfile`
|
||||||
|
const parentFileName = `${tempDir}/parent.vhd`
|
||||||
|
const child1FileName = `${tempDir}/child1.vhd`
|
||||||
|
const child2FileName = `${tempDir}/child2.vhd`
|
||||||
|
const recoveredFileName = `${tempDir}/recovered`
|
||||||
|
await createRandomFile(randomFileName, mbOfRandom)
|
||||||
|
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
|
||||||
await execa('qemu-img', [
|
await execa('qemu-img', [
|
||||||
'create',
|
'create',
|
||||||
'-fvpc',
|
'-fvpc',
|
||||||
'parent.vhd',
|
parentFileName,
|
||||||
mbOfRandom + 1 + 'M',
|
mbOfRandom + 1 + 'M',
|
||||||
])
|
])
|
||||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
await convertFromRawToVhd(randomFileName, child1FileName)
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
const handler = getHandler({ url: 'file://' })
|
||||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
await execa('vhd-util', [
|
||||||
const vhd = new Vhd(handler, 'child2.vhd')
|
'snapshot',
|
||||||
|
'-n',
|
||||||
|
child2FileName,
|
||||||
|
'-p',
|
||||||
|
child1FileName,
|
||||||
|
])
|
||||||
|
const vhd = new Vhd(handler, child2FileName)
|
||||||
await vhd.readHeaderAndFooter()
|
await vhd.readHeaderAndFooter()
|
||||||
await vhd.readBlockAllocationTable()
|
await vhd.readBlockAllocationTable()
|
||||||
vhd.footer.creatorApplication = 'xoa'
|
vhd.footer.creatorApplication = 'xoa'
|
||||||
await vhd.writeFooter()
|
await vhd.writeFooter()
|
||||||
|
|
||||||
const originalSize = await handler._getSize('randomfile')
|
const originalSize = await handler._getSize(randomFileName)
|
||||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
|
||||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
await chainVhd(handler, child1FileName, handler, child2FileName, true)
|
||||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
|
||||||
const smallRandom = await fs.readFile('small_randomfile')
|
const smallRandom = await fs.readFile(smallRandomFileName)
|
||||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
const newVhd = new Vhd(handler, child2FileName)
|
||||||
await newVhd.readHeaderAndFooter()
|
await newVhd.readHeaderAndFooter()
|
||||||
await newVhd.readBlockAllocationTable()
|
await newVhd.readBlockAllocationTable()
|
||||||
await newVhd.writeData(5, smallRandom)
|
await newVhd.writeData(5, smallRandom)
|
||||||
await checkFile('child2.vhd')
|
await checkFile(child2FileName)
|
||||||
await checkFile('child1.vhd')
|
await checkFile(child1FileName)
|
||||||
await checkFile('parent.vhd')
|
await checkFile(parentFileName)
|
||||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
await vhdMerge(handler, parentFileName, handler, child1FileName)
|
||||||
await checkFile('parent.vhd')
|
await checkFile(parentFileName)
|
||||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
await chainVhd(handler, parentFileName, handler, child2FileName, true)
|
||||||
await checkFile('child2.vhd')
|
await checkFile(child2FileName)
|
||||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
await vhdMerge(handler, parentFileName, handler, child2FileName)
|
||||||
await checkFile('parent.vhd')
|
await checkFile(parentFileName)
|
||||||
await recoverRawContent(
|
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
|
||||||
'parent.vhd',
|
await execa('cp', [randomFileName, random2FileName])
|
||||||
'recovered_from_coalescing',
|
const fd = await fs.open(random2FileName, 'r+')
|
||||||
originalSize
|
|
||||||
)
|
|
||||||
await execa('cp', ['randomfile', 'randomfile2'])
|
|
||||||
const fd = await fs.open('randomfile2', 'r+')
|
|
||||||
try {
|
try {
|
||||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||||
} finally {
|
} finally {
|
||||||
await fs.close(fd)
|
await fs.close(fd)
|
||||||
}
|
}
|
||||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||||
await fs.readFile('randomfile2')
|
await fs.readFile(random2FileName)
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|
||||||
test('createSyntheticStream passes vhd-util check', async () => {
|
test.only('createSyntheticStream passes vhd-util check', async () => {
|
||||||
const initalSize = 4
|
const initalSize = 4
|
||||||
const expectedVhdSize = 4197888
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
await createRandomFile('randomfile', initalSize)
|
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
|
||||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
await createRandomFile(rawFileName, initalSize)
|
||||||
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
|
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||||
expect(stream.length).toEqual(expectedVhdSize)
|
await checkFile(vhdFileName)
|
||||||
await fromEvent(
|
const handler = getHandler({ url: 'file://' })
|
||||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
const stream = await createSyntheticStream(handler, vhdFileName)
|
||||||
'finish'
|
const expectedVhdSize = (await fs.stat(vhdFileName)).size
|
||||||
|
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
|
||||||
|
await pFromCallback(cb =>
|
||||||
|
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
|
||||||
)
|
)
|
||||||
await checkFile('recovered.vhd')
|
await checkFile(recoveredVhdFileName)
|
||||||
const stats = await fs.stat('recovered.vhd')
|
const stats = await fs.stat(recoveredVhdFileName)
|
||||||
expect(stats.size).toEqual(expectedVhdSize)
|
expect(stats.size).toEqual(expectedVhdSize)
|
||||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
|
||||||
})
|
})
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
"fs-promise": "^2.0.0",
|
"fs-promise": "^2.0.0",
|
||||||
"get-stream": "^4.0.0",
|
"get-stream": "^4.0.0",
|
||||||
"index-modules": "^0.3.0",
|
"index-modules": "^0.3.0",
|
||||||
|
"readable-stream": "^3.0.6",
|
||||||
"rimraf": "^2.6.2",
|
"rimraf": "^2.6.2",
|
||||||
"tmp": "^0.0.33"
|
"tmp": "^0.0.33"
|
||||||
},
|
},
|
||||||
|
20
packages/vhd-lib/src/_checkFooter.js
Normal file
20
packages/vhd-lib/src/_checkFooter.js
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
import assert from 'assert'
|
||||||
|
|
||||||
|
import {
|
||||||
|
DISK_TYPE_DIFFERENCING,
|
||||||
|
DISK_TYPE_DYNAMIC,
|
||||||
|
FILE_FORMAT_VERSION,
|
||||||
|
FOOTER_COOKIE,
|
||||||
|
FOOTER_SIZE,
|
||||||
|
} from './_constants'
|
||||||
|
|
||||||
|
export default footer => {
|
||||||
|
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
|
||||||
|
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||||
|
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||||
|
assert(footer.originalSize <= footer.currentSize)
|
||||||
|
assert(
|
||||||
|
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||||
|
footer.diskType === DISK_TYPE_DYNAMIC
|
||||||
|
)
|
||||||
|
}
|
14
packages/vhd-lib/src/_checkHeader.js
Normal file
14
packages/vhd-lib/src/_checkHeader.js
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
import assert from 'assert'
|
||||||
|
|
||||||
|
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
|
||||||
|
|
||||||
|
export default (header, footer) => {
|
||||||
|
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||||
|
assert.strictEqual(header.dataOffset, undefined)
|
||||||
|
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||||
|
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||||
|
|
||||||
|
if (footer !== undefined) {
|
||||||
|
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||||
|
}
|
||||||
|
}
|
47
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
47
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
import assert from 'assert'
|
||||||
|
|
||||||
|
import { BLOCK_UNUSED } from './_constants'
|
||||||
|
|
||||||
|
// get the identifiers and first sectors of the first and last block
|
||||||
|
// in the file
|
||||||
|
export default bat => {
|
||||||
|
const n = bat.length
|
||||||
|
assert.notStrictEqual(n, 0)
|
||||||
|
assert.strictEqual(n % 4, 0)
|
||||||
|
|
||||||
|
let i = 0
|
||||||
|
let j = 0
|
||||||
|
let first, firstSector, last, lastSector
|
||||||
|
|
||||||
|
// get first allocated block for initialization
|
||||||
|
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||||
|
i += 1
|
||||||
|
j += 4
|
||||||
|
|
||||||
|
if (j === n) {
|
||||||
|
const error = new Error('no allocated block found')
|
||||||
|
error.noBlock = true
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastSector = firstSector
|
||||||
|
first = last = i
|
||||||
|
|
||||||
|
while (j < n) {
|
||||||
|
const sector = bat.readUInt32BE(j)
|
||||||
|
if (sector !== BLOCK_UNUSED) {
|
||||||
|
if (sector < firstSector) {
|
||||||
|
first = i
|
||||||
|
firstSector = sector
|
||||||
|
} else if (sector > lastSector) {
|
||||||
|
last = i
|
||||||
|
lastSector = sector
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
j += 4
|
||||||
|
}
|
||||||
|
|
||||||
|
return { first, firstSector, last, lastSector }
|
||||||
|
}
|
50
packages/vhd-lib/src/_readChunk.js
Normal file
50
packages/vhd-lib/src/_readChunk.js
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
export default async function readChunk(stream, n) {
|
||||||
|
if (n === 0) {
|
||||||
|
return Buffer.alloc(0)
|
||||||
|
}
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
const chunks = []
|
||||||
|
let i = 0
|
||||||
|
|
||||||
|
function clean() {
|
||||||
|
stream.removeListener('readable', onReadable)
|
||||||
|
stream.removeListener('end', onEnd)
|
||||||
|
stream.removeListener('error', onError)
|
||||||
|
}
|
||||||
|
|
||||||
|
function resolve2() {
|
||||||
|
clean()
|
||||||
|
resolve(Buffer.concat(chunks, i))
|
||||||
|
}
|
||||||
|
|
||||||
|
function onEnd() {
|
||||||
|
resolve2()
|
||||||
|
clean()
|
||||||
|
}
|
||||||
|
|
||||||
|
function onError(error) {
|
||||||
|
reject(error)
|
||||||
|
clean()
|
||||||
|
}
|
||||||
|
|
||||||
|
function onReadable() {
|
||||||
|
const chunk = stream.read(n - i)
|
||||||
|
if (chunk === null) {
|
||||||
|
return // wait for more data
|
||||||
|
}
|
||||||
|
i += chunk.length
|
||||||
|
chunks.push(chunk)
|
||||||
|
if (i >= n) {
|
||||||
|
resolve2()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.on('end', onEnd)
|
||||||
|
stream.on('error', onError)
|
||||||
|
stream.on('readable', onReadable)
|
||||||
|
|
||||||
|
if (stream.readable) {
|
||||||
|
onReadable()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
93
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
93
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
/* eslint-env jest */
|
||||||
|
|
||||||
|
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||||
|
import execa from 'execa'
|
||||||
|
import fs from 'fs-extra'
|
||||||
|
import rimraf from 'rimraf'
|
||||||
|
import getStream from 'get-stream'
|
||||||
|
import tmp from 'tmp'
|
||||||
|
import { createReadStream, createWriteStream } from 'fs'
|
||||||
|
import { pFromCallback } from 'promise-toolbox'
|
||||||
|
import { pipeline } from 'readable-stream'
|
||||||
|
|
||||||
|
import { createVhdStreamWithLength } from '.'
|
||||||
|
import { FOOTER_SIZE } from './_constants'
|
||||||
|
|
||||||
|
let tempDir = null
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||||
|
})
|
||||||
|
|
||||||
|
afterEach(async () => {
|
||||||
|
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||||
|
})
|
||||||
|
|
||||||
|
async function convertFromRawToVhd(rawName, vhdName) {
|
||||||
|
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||||
|
}
|
||||||
|
|
||||||
|
async function createRandomFile(name, size) {
|
||||||
|
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||||
|
while (size-- > 0) {
|
||||||
|
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
const input = await createRandomStream(size)
|
||||||
|
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||||
|
}
|
||||||
|
|
||||||
|
test('createVhdStreamWithLength can extract length', async () => {
|
||||||
|
const initialSize = 4 * 1024
|
||||||
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
|
const vhdName = `${tempDir}/randomfile.vhd`
|
||||||
|
const outputVhdName = `${tempDir}/output.vhd`
|
||||||
|
await createRandomFile(rawFileName, initialSize)
|
||||||
|
await convertFromRawToVhd(rawFileName, vhdName)
|
||||||
|
const vhdSize = fs.statSync(vhdName).size
|
||||||
|
const result = await createVhdStreamWithLength(
|
||||||
|
await createReadStream(vhdName)
|
||||||
|
)
|
||||||
|
expect(result.length).toEqual(vhdSize)
|
||||||
|
const outputFileStream = await createWriteStream(outputVhdName)
|
||||||
|
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||||
|
const outputSize = fs.statSync(outputVhdName).size
|
||||||
|
expect(outputSize).toEqual(vhdSize)
|
||||||
|
})
|
||||||
|
|
||||||
|
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
|
||||||
|
const initialSize = 4 * 1024
|
||||||
|
const rawFileName = `${tempDir}/randomfile`
|
||||||
|
const vhdName = `${tempDir}/randomfile.vhd`
|
||||||
|
const outputVhdName = `${tempDir}/output.vhd`
|
||||||
|
await createRandomFile(rawFileName, initialSize)
|
||||||
|
await convertFromRawToVhd(rawFileName, vhdName)
|
||||||
|
const vhdSize = fs.statSync(vhdName).size
|
||||||
|
// read file footer
|
||||||
|
const footer = await getStream.buffer(
|
||||||
|
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||||
|
)
|
||||||
|
|
||||||
|
// we'll override the footer
|
||||||
|
const endOfFile = await createWriteStream(vhdName, {
|
||||||
|
flags: 'r+',
|
||||||
|
start: vhdSize - FOOTER_SIZE,
|
||||||
|
})
|
||||||
|
// write a blank over the previous footer
|
||||||
|
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||||
|
// write the footer after the new blank
|
||||||
|
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||||
|
const longerSize = fs.statSync(vhdName).size
|
||||||
|
// check input file has been lengthened
|
||||||
|
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||||
|
const result = await createVhdStreamWithLength(
|
||||||
|
await createReadStream(vhdName)
|
||||||
|
)
|
||||||
|
expect(result.length).toEqual(vhdSize)
|
||||||
|
const outputFileStream = await createWriteStream(outputVhdName)
|
||||||
|
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||||
|
const outputSize = fs.statSync(outputVhdName).size
|
||||||
|
// check out file has been shortened again
|
||||||
|
expect(outputSize).toEqual(vhdSize)
|
||||||
|
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||||
|
})
|
80
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
80
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
import assert from 'assert'
|
||||||
|
import { pipeline, Transform } from 'readable-stream'
|
||||||
|
|
||||||
|
import checkFooter from './_checkFooter'
|
||||||
|
import checkHeader from './_checkHeader'
|
||||||
|
import noop from './_noop'
|
||||||
|
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||||
|
import readChunk from './_readChunk'
|
||||||
|
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
|
||||||
|
import { fuFooter, fuHeader } from './_structs'
|
||||||
|
|
||||||
|
class EndCutterStream extends Transform {
|
||||||
|
constructor(footerOffset, footerBuffer) {
|
||||||
|
super()
|
||||||
|
this._footerOffset = footerOffset
|
||||||
|
this._footerBuffer = footerBuffer
|
||||||
|
this._position = 0
|
||||||
|
this._done = false
|
||||||
|
}
|
||||||
|
|
||||||
|
_transform(data, encoding, callback) {
|
||||||
|
if (!this._done) {
|
||||||
|
if (this._position + data.length >= this._footerOffset) {
|
||||||
|
this._done = true
|
||||||
|
const difference = this._footerOffset - this._position
|
||||||
|
data = data.slice(0, difference)
|
||||||
|
this.push(data)
|
||||||
|
this.push(this._footerBuffer)
|
||||||
|
} else {
|
||||||
|
this.push(data)
|
||||||
|
}
|
||||||
|
this._position += data.length
|
||||||
|
}
|
||||||
|
callback()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default async function createVhdStreamWithLength(stream) {
|
||||||
|
const readBuffers = []
|
||||||
|
let streamPosition = 0
|
||||||
|
|
||||||
|
async function readStream(length) {
|
||||||
|
const chunk = await readChunk(stream, length)
|
||||||
|
assert.strictEqual(chunk.length, length)
|
||||||
|
streamPosition += chunk.length
|
||||||
|
readBuffers.push(chunk)
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
const footerBuffer = await readStream(FOOTER_SIZE)
|
||||||
|
const footer = fuFooter.unpack(footerBuffer)
|
||||||
|
checkFooter(footer)
|
||||||
|
|
||||||
|
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
|
||||||
|
checkHeader(header, footer)
|
||||||
|
|
||||||
|
await readStream(header.tableOffset - streamPosition)
|
||||||
|
|
||||||
|
const table = await readStream(header.maxTableEntries * 4)
|
||||||
|
|
||||||
|
readBuffers.reverse()
|
||||||
|
for (const buf of readBuffers) {
|
||||||
|
stream.unshift(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
const footerOffset =
|
||||||
|
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
|
||||||
|
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
|
||||||
|
header.blockSize
|
||||||
|
|
||||||
|
// ignore any data after footerOffset and push footerBuffer
|
||||||
|
//
|
||||||
|
// this is necessary to ignore any blank space between the last block and the
|
||||||
|
// final footer which would invalidate the size we computed
|
||||||
|
const newStream = new EndCutterStream(footerOffset, footerBuffer)
|
||||||
|
pipeline(stream, newStream, noop)
|
||||||
|
|
||||||
|
newStream.length = footerOffset + FOOTER_SIZE
|
||||||
|
return newStream
|
||||||
|
}
|
@ -11,3 +11,6 @@ export {
|
|||||||
} from './createReadableSparseStream'
|
} from './createReadableSparseStream'
|
||||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||||
export { default as mergeVhd } from './merge'
|
export { default as mergeVhd } from './merge'
|
||||||
|
export {
|
||||||
|
default as createVhdStreamWithLength,
|
||||||
|
} from './createVhdStreamWithLength'
|
||||||
|
@ -1,19 +1,16 @@
|
|||||||
import assert from 'assert'
|
import assert from 'assert'
|
||||||
import { fromEvent } from 'promise-toolbox'
|
import { fromEvent } from 'promise-toolbox'
|
||||||
|
|
||||||
|
import checkFooter from './_checkFooter'
|
||||||
|
import checkHeader from './_checkHeader'
|
||||||
import constantStream from './_constant-stream'
|
import constantStream from './_constant-stream'
|
||||||
|
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||||
import {
|
import {
|
||||||
BLOCK_UNUSED,
|
BLOCK_UNUSED,
|
||||||
DISK_TYPE_DIFFERENCING,
|
|
||||||
DISK_TYPE_DYNAMIC,
|
|
||||||
FILE_FORMAT_VERSION,
|
|
||||||
FOOTER_COOKIE,
|
|
||||||
FOOTER_SIZE,
|
FOOTER_SIZE,
|
||||||
HEADER_COOKIE,
|
|
||||||
HEADER_SIZE,
|
HEADER_SIZE,
|
||||||
HEADER_VERSION,
|
|
||||||
PARENT_LOCATOR_ENTRIES,
|
PARENT_LOCATOR_ENTRIES,
|
||||||
PLATFORM_NONE,
|
PLATFORM_NONE,
|
||||||
PLATFORM_W2KU,
|
PLATFORM_W2KU,
|
||||||
@ -170,21 +167,10 @@ export default class Vhd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
checkFooter(footer)
|
||||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
|
||||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
|
||||||
assert(footer.originalSize <= footer.currentSize)
|
|
||||||
assert(
|
|
||||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
|
||||||
footer.diskType === DISK_TYPE_DYNAMIC
|
|
||||||
)
|
|
||||||
|
|
||||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
checkHeader(header, footer)
|
||||||
assert.strictEqual(header.dataOffset, undefined)
|
|
||||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
|
||||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
|
||||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
|
||||||
|
|
||||||
// Compute the number of sectors in one block.
|
// Compute the number of sectors in one block.
|
||||||
// Default: One block contains 4096 sectors of 512 bytes.
|
// Default: One block contains 4096 sectors of 512 bytes.
|
||||||
@ -242,49 +228,6 @@ export default class Vhd {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get the identifiers and first sectors of the first and last block
|
|
||||||
// in the file
|
|
||||||
//
|
|
||||||
_getFirstAndLastBlocks() {
|
|
||||||
const n = this.header.maxTableEntries
|
|
||||||
const bat = this.blockTable
|
|
||||||
let i = 0
|
|
||||||
let j = 0
|
|
||||||
let first, firstSector, last, lastSector
|
|
||||||
|
|
||||||
// get first allocated block for initialization
|
|
||||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
|
||||||
i += 1
|
|
||||||
j += 4
|
|
||||||
|
|
||||||
if (i === n) {
|
|
||||||
const error = new Error('no allocated block found')
|
|
||||||
error.noBlock = true
|
|
||||||
throw error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lastSector = firstSector
|
|
||||||
first = last = i
|
|
||||||
|
|
||||||
while (i < n) {
|
|
||||||
const sector = bat.readUInt32BE(j)
|
|
||||||
if (sector !== BLOCK_UNUSED) {
|
|
||||||
if (sector < firstSector) {
|
|
||||||
first = i
|
|
||||||
firstSector = sector
|
|
||||||
} else if (sector > lastSector) {
|
|
||||||
last = i
|
|
||||||
lastSector = sector
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
i += 1
|
|
||||||
j += 4
|
|
||||||
}
|
|
||||||
|
|
||||||
return { first, firstSector, last, lastSector }
|
|
||||||
}
|
|
||||||
|
|
||||||
// =================================================================
|
// =================================================================
|
||||||
// Write functions.
|
// Write functions.
|
||||||
// =================================================================
|
// =================================================================
|
||||||
@ -311,7 +254,9 @@ export default class Vhd {
|
|||||||
|
|
||||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||||
try {
|
try {
|
||||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
|
||||||
|
this.blockTable
|
||||||
|
)
|
||||||
const tableOffset = this.header.tableOffset
|
const tableOffset = this.header.tableOffset
|
||||||
const { batSize } = this
|
const { batSize } = this
|
||||||
const newMinSector = Math.ceil(
|
const newMinSector = Math.ceil(
|
||||||
|
@ -4,22 +4,20 @@ import rimraf from 'rimraf'
|
|||||||
import tmp from 'tmp'
|
import tmp from 'tmp'
|
||||||
import { createWriteStream, readFile } from 'fs-promise'
|
import { createWriteStream, readFile } from 'fs-promise'
|
||||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||||
|
import { pipeline } from 'readable-stream'
|
||||||
|
|
||||||
import { createReadableRawStream, createReadableSparseStream } from './'
|
import { createReadableRawStream, createReadableSparseStream } from './'
|
||||||
|
|
||||||
import { createFooter } from './src/_createFooterHeader'
|
import { createFooter } from './src/_createFooterHeader'
|
||||||
|
|
||||||
const initialDir = process.cwd()
|
let tempDir = null
|
||||||
|
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||||
process.chdir(dir)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
afterEach(async () => {
|
afterEach(async () => {
|
||||||
const tmpDir = process.cwd()
|
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||||
process.chdir(initialDir)
|
|
||||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
test('createFooter() does not crash', () => {
|
test('createFooter() does not crash', () => {
|
||||||
@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
|
|||||||
}
|
}
|
||||||
const fileSize = 1000
|
const fileSize = 1000
|
||||||
const stream = createReadableRawStream(fileSize, mockParser)
|
const stream = createReadableRawStream(fileSize, mockParser)
|
||||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
await pFromCallback(cb =>
|
||||||
await fromEvent(pipe, 'finish')
|
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
|
||||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
)
|
||||||
|
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||||
})
|
})
|
||||||
|
|
||||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||||
@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
|||||||
new Promise((resolve, reject) => {
|
new Promise((resolve, reject) => {
|
||||||
const stream = createReadableRawStream(100000, mockParser)
|
const stream = createReadableRawStream(100000, mockParser)
|
||||||
stream.on('error', reject)
|
stream.on('error', reject)
|
||||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
|
||||||
pipe.on('finish', resolve)
|
err ? reject(err) : resolve()
|
||||||
pipe.on('error', reject)
|
)
|
||||||
})
|
})
|
||||||
).rejects.toThrow('Received out of order blocks')
|
).rejects.toThrow('Received out of order blocks')
|
||||||
})
|
})
|
||||||
@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
|||||||
blocks
|
blocks
|
||||||
)
|
)
|
||||||
expect(stream.length).toEqual(4197888)
|
expect(stream.length).toEqual(4197888)
|
||||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
|
||||||
await fromEvent(pipe, 'finish')
|
await fromEvent(pipe, 'finish')
|
||||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||||
await execa('qemu-img', [
|
await execa('qemu-img', [
|
||||||
'convert',
|
'convert',
|
||||||
'-f',
|
'-f',
|
||||||
'vpc',
|
'vpc',
|
||||||
'-O',
|
'-O',
|
||||||
'raw',
|
'raw',
|
||||||
'output.vhd',
|
`${tempDir}/output.vhd`,
|
||||||
'out1.raw',
|
`${tempDir}/out1.raw`,
|
||||||
])
|
])
|
||||||
const out1 = await readFile('out1.raw')
|
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||||
const expected = Buffer.alloc(fileSize)
|
const expected = Buffer.alloc(fileSize)
|
||||||
blocks.forEach(b => {
|
blocks.forEach(b => {
|
||||||
b.data.copy(expected, b.offsetBytes)
|
b.data.copy(expected, b.offsetBytes)
|
||||||
|
@ -9,6 +9,18 @@ datadir = '/var/lib/xo-server/data'
|
|||||||
# Necessary for external authentication providers.
|
# Necessary for external authentication providers.
|
||||||
createUserOnFirstSignin = true
|
createUserOnFirstSignin = true
|
||||||
|
|
||||||
|
# XAPI does not support chunked encoding in HTTP requests which is necessary
|
||||||
|
# when the content length is not know which is the case for many backup related
|
||||||
|
# operations in XO.
|
||||||
|
#
|
||||||
|
# It's possible to work-around this for VHDs because it's possible to guess
|
||||||
|
# their size just by looking at the beginning of the stream.
|
||||||
|
#
|
||||||
|
# But it is a guess, not a certainty, it depends on how the VHDs are formatted
|
||||||
|
# by XenServer, therefore it's disabled for the moment but can be enabled
|
||||||
|
# specifically for a user if necessary.
|
||||||
|
guessVhdSizeOnImport = false
|
||||||
|
|
||||||
# Whether API logs should contains the full request/response on
|
# Whether API logs should contains the full request/response on
|
||||||
# errors.
|
# errors.
|
||||||
#
|
#
|
||||||
|
@ -68,6 +68,7 @@ import {
|
|||||||
parseDateTime,
|
parseDateTime,
|
||||||
prepareXapiParam,
|
prepareXapiParam,
|
||||||
} from './utils'
|
} from './utils'
|
||||||
|
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||||
|
|
||||||
const log = createLogger('xo:xapi')
|
const log = createLogger('xo:xapi')
|
||||||
|
|
||||||
@ -93,8 +94,10 @@ export const IPV6_CONFIG_MODES = ['None', 'DHCP', 'Static', 'Autoconf']
|
|||||||
|
|
||||||
@mixin(mapToArray(mixins))
|
@mixin(mapToArray(mixins))
|
||||||
export default class Xapi extends XapiBase {
|
export default class Xapi extends XapiBase {
|
||||||
constructor(...args) {
|
constructor({ guessVhdSizeOnImport, ...opts }) {
|
||||||
super(...args)
|
super(opts)
|
||||||
|
|
||||||
|
this._guessVhdSizeOnImport = guessVhdSizeOnImport
|
||||||
|
|
||||||
// Patch getObject to resolve _xapiId property.
|
// Patch getObject to resolve _xapiId property.
|
||||||
this.getObject = (getObject => (...args) => {
|
this.getObject = (getObject => (...args) => {
|
||||||
@ -2095,11 +2098,16 @@ export default class Xapi extends XapiBase {
|
|||||||
// -----------------------------------------------------------------
|
// -----------------------------------------------------------------
|
||||||
|
|
||||||
async _importVdiContent(vdi, body, format = VDI_FORMAT_VHD) {
|
async _importVdiContent(vdi, body, format = VDI_FORMAT_VHD) {
|
||||||
if (__DEV__ && body.length == null) {
|
if (typeof body.pipe === 'function' && body.length === undefined) {
|
||||||
throw new Error(
|
if (this._guessVhdSizeOnImport && format === VDI_FORMAT_VHD) {
|
||||||
'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.'
|
body = await createVhdStreamWithLength(body)
|
||||||
)
|
} else if (__DEV__) {
|
||||||
|
throw new Error(
|
||||||
|
'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.'
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
body.task,
|
body.task,
|
||||||
body.checksumVerified,
|
body.checksumVerified,
|
||||||
|
@ -40,7 +40,7 @@ const log = createLogger('xo:xo-mixins:xen-servers')
|
|||||||
// - _xapis[server.id] id defined
|
// - _xapis[server.id] id defined
|
||||||
// - _serverIdsByPool[xapi.pool.$id] is server.id
|
// - _serverIdsByPool[xapi.pool.$id] is server.id
|
||||||
export default class {
|
export default class {
|
||||||
constructor(xo, { xapiOptions }) {
|
constructor(xo, { guessVhdSizeOnImport, xapiOptions }) {
|
||||||
this._objectConflicts = { __proto__: null } // TODO: clean when a server is disconnected.
|
this._objectConflicts = { __proto__: null } // TODO: clean when a server is disconnected.
|
||||||
const serversDb = (this._servers = new Servers({
|
const serversDb = (this._servers = new Servers({
|
||||||
connection: xo._redis,
|
connection: xo._redis,
|
||||||
@ -49,7 +49,10 @@ export default class {
|
|||||||
}))
|
}))
|
||||||
this._serverIdsByPool = { __proto__: null }
|
this._serverIdsByPool = { __proto__: null }
|
||||||
this._stats = new XapiStats()
|
this._stats = new XapiStats()
|
||||||
this._xapiOptions = xapiOptions
|
this._xapiOptions = {
|
||||||
|
guessVhdSizeOnImport,
|
||||||
|
...xapiOptions,
|
||||||
|
}
|
||||||
this._xapis = { __proto__: null }
|
this._xapis = { __proto__: null }
|
||||||
this._xo = xo
|
this._xo = xo
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user