chore(xo-server/vhd-merge): various updates (#2767)

Fixes #2746 

- implement parent locators
- tests
- remove `@nraynaud/struct-fu`
This commit is contained in:
Nicolas Raynaud 2018-03-27 09:39:36 -07:00 committed by Julien Fontanet
parent 0b9d031965
commit 7e689076d8
10 changed files with 541 additions and 171 deletions

View File

@ -7,6 +7,11 @@ node_js:
# Use containers. # Use containers.
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/ # http://docs.travis-ci.com/user/workers/container-based-infrastructure/
sudo: false sudo: false
addons:
apt:
packages:
- qemu-utils
- blktap-utils
before_install: before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash - curl -o- -L https://yarnpkg.com/install.sh | bash
@ -14,3 +19,7 @@ before_install:
cache: cache:
yarn: true yarn: true
script:
- yarn run test
- yarn run test-integration

View File

@ -52,12 +52,13 @@
"build": "scripts/run-script --parallel build", "build": "scripts/run-script --parallel build",
"clean": "scripts/run-script --parallel clean", "clean": "scripts/run-script --parallel clean",
"dev": "scripts/run-script --parallel dev", "dev": "scripts/run-script --parallel dev",
"dev-test": "jest --bail --watch", "dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
"posttest": "scripts/run-script test", "posttest": "scripts/run-script test",
"precommit": "scripts/lint-staged", "precommit": "scripts/lint-staged",
"prepare": "scripts/run-script prepare", "prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .", "pretest": "eslint --ignore-path .gitignore .",
"test": "jest" "test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test-integration": "jest \".integ\\.spec\\.js$\""
}, },
"workspaces": [ "workspaces": [
"@xen-orchestra/*", "@xen-orchestra/*",

View File

@ -26,7 +26,7 @@
"node": ">=4" "node": ">=4"
}, },
"dependencies": { "dependencies": {
"@nraynaud/struct-fu": "^1.0.1", "struct-fu": "^1.2.0",
"@nraynaud/xo-fs": "^0.0.5", "@nraynaud/xo-fs": "^0.0.5",
"babel-runtime": "^6.22.0", "babel-runtime": "^6.22.0",
"exec-promise": "^0.7.0" "exec-promise": "^0.7.0"

View File

@ -1,5 +1,5 @@
import assert from 'assert' import assert from 'assert'
import fu from '@nraynaud/struct-fu' import fu from 'struct-fu'
import { dirname } from 'path' import { dirname } from 'path'
// =================================================================== // ===================================================================

View File

@ -40,7 +40,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/", "dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/", "prebuild": "rimraf dist/",
"predev": "yarn run prebuild", "predev": "yarn run prebuild",
"prepublishOnly": "yarn run build" "prepare": "yarn run build"
}, },
"babel": { "babel": {
"plugins": [ "plugins": [

View File

@ -33,7 +33,6 @@
"dependencies": { "dependencies": {
"@babel/polyfill": "7.0.0-beta.42", "@babel/polyfill": "7.0.0-beta.42",
"@marsaud/smb2-promise": "^0.2.1", "@marsaud/smb2-promise": "^0.2.1",
"@nraynaud/struct-fu": "^1.0.1",
"@xen-orchestra/cron": "^1.0.2", "@xen-orchestra/cron": "^1.0.2",
"ajv": "^6.1.1", "ajv": "^6.1.1",
"app-conf": "^0.5.0", "app-conf": "^0.5.0",
@ -104,6 +103,7 @@
"split-lines": "^1.1.0", "split-lines": "^1.1.0",
"stack-chain": "^2.0.0", "stack-chain": "^2.0.0",
"stoppable": "^1.0.5", "stoppable": "^1.0.5",
"struct-fu": "^1.2.0",
"tar-stream": "^1.5.5", "tar-stream": "^1.5.5",
"through2": "^2.0.3", "through2": "^2.0.3",
"tmp": "^0.0.33", "tmp": "^0.0.33",

View File

@ -0,0 +1,284 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import { randomBytes } from 'crypto'
import { fromEvent } from 'promise-toolbox'
import LocalHandler from './remote-handlers/local'
import vhdMerge, {
chainVhd,
createReadStream,
Vhd,
VHD_SECTOR_SIZE,
} from './vhd-merge'
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
const initialDir = process.cwd()
jest.setTimeout(10000)
beforeEach(async () => {
const dir = await tmpDir()
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
async function createRandomFile (name, sizeMb) {
await execa('bash', [
'-c',
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
])
}
async function checkFile (vhdName) {
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
}
async function recoverRawContent (vhdName, rawName, originalSize) {
await checkFile(vhdName)
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
if (originalSize !== undefined) {
await execa('truncate', ['-s', originalSize, rawName])
}
}
async function convertFromRawToVhd (rawName, vhdName) {
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
}
test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb =>
randomBytes(VHD_SECTOR_SIZE, cb)
)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
// hole before the block of data
const recoveredFile = await fs.open('recovered', 'w')
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
const block = (await vhd2._readBlock(i)).data
await fs.write(
recoveredFile,
block,
0,
block.length,
vhd2.header.blockSize * i
)
}
}
} finally {
fs.close(recoveredFile)
}
const recovered = await streamToBuffer(
await fs.createReadStream('recovered', {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
})
)
expect(recovered).toEqual(randomBuffer)
})
test('writeData on empty file', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('writeData in 2 non-overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(
splitPointSectors,
randomData.slice(splitPointSectors * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('writeData in 2 overlaping operations', async () => {
const mbOfRandom = 3
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
await newVhd.writeData(
startSecondWrite,
randomData.slice(startSecondWrite * 512)
)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
})
test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('coalesce works with empty parent files', async () => {
const mbOfRandom = 2
await createRandomFile('randomfile', mbOfRandom)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
await execa('qemu-img', [
'create',
'-fvpc',
'empty.vhd',
mbOfRandom + 1 + 'M',
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
await fs.readFile('randomfile')
)
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
await createRandomFile('randomfile', mbOfRandom)
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
await execa('qemu-img', [
'create',
'-fvpc',
'parent.vhd',
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
await checkFile('parent.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await checkFile('parent.vhd')
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
await checkFile('child2.vhd')
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
await checkFile('parent.vhd')
await recoverRawContent(
'parent.vhd',
'recovered_from_coalescing',
originalSize
)
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
} finally {
await fs.close(fd)
}
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
await fs.readFile('randomfile2')
)
})
test('createReadStream passes vhd-util check', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const stream = createReadStream(handler, 'randomfile.vhd')
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
)
await checkFile('recovered.vhd')
})

View File

@ -3,8 +3,7 @@
import assert from 'assert' import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream' import asyncIteratorToStream from 'async-iterator-to-stream'
import concurrency from 'limit-concurrency-decorator' import concurrency from 'limit-concurrency-decorator'
import fu from '@nraynaud/struct-fu' import fu from 'struct-fu'
import isEqual from 'lodash/isEqual'
import { dirname, relative } from 'path' import { dirname, relative } from 'path'
import { fromEvent } from 'promise-toolbox' import { fromEvent } from 'promise-toolbox'
@ -13,7 +12,7 @@ import constantStream from './constant-stream'
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils' import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
const VHD_UTIL_DEBUG = 0 const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-util]${str}`) : noop const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
// =================================================================== // ===================================================================
// //
@ -28,7 +27,7 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-util]${str}`) : noop
// Sizes in bytes. // Sizes in bytes.
const VHD_FOOTER_SIZE = 512 const VHD_FOOTER_SIZE = 512
const VHD_HEADER_SIZE = 1024 const VHD_HEADER_SIZE = 1024
const VHD_SECTOR_SIZE = 512 export const VHD_SECTOR_SIZE = 512
// Block allocation table entry size. (Block addr) // Block allocation table entry size. (Block addr)
const VHD_ENTRY_SIZE = 4 const VHD_ENTRY_SIZE = 4
@ -40,6 +39,12 @@ const VHD_PLATFORM_CODE_NONE = 0
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup. export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup. export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
export const PLATFORM_NONE = 0
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
// Other. // Other.
const BLOCK_UNUSED = 0xffffffff const BLOCK_UNUSED = 0xffffffff
const BIT_MASK = 0x80 const BIT_MASK = 0x80
@ -50,28 +55,24 @@ BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// =================================================================== // ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const fuFooter = fu.struct([ const fuFooter = fu.struct([
fu.char('cookie', 8), // 0 fu.char('cookie', 8), // 0
fu.uint32('features'), // 8 fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12 fu.uint32('fileFormatVersion'), // 12
fu.struct('dataOffset', [ uint64('dataOffset'), // offset of the header, should always be 512
fu.uint32('high'), // 16
fu.uint32('low'), // 20
]),
fu.uint32('timestamp'), // 24 fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28 fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32 fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36 fu.uint32('creatorHostOs'), // 36
fu.struct('originalSize', [ uint64('originalSize'),
// At the creation, current size of the hard disk. uint64('currentSize'),
fu.uint32('high'), // 40
fu.uint32('low'), // 44
]),
fu.struct('currentSize', [
// Current size of the virtual disk. At the creation: currentSize = originalSize.
fu.uint32('high'), // 48
fu.uint32('low'), // 52
]),
fu.struct('diskGeometry', [ fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56 fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58 fu.uint8('heads'), // 58
@ -87,12 +88,8 @@ const fuFooter = fu.struct([
const fuHeader = fu.struct([ const fuHeader = fu.struct([
fu.char('cookie', 8), fu.char('cookie', 8),
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]), fu.uint8('dataOffsetUnused', 8),
fu.struct('tableOffset', [ uint64('tableOffset'),
// Absolute byte offset of the Block Allocation Table.
fu.uint32('high'),
fu.uint32('low'),
]),
fu.uint32('headerVersion'), fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table. fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB) fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
@ -108,11 +105,7 @@ const fuHeader = fu.struct([
fu.uint32('platformDataSpace'), fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'), fu.uint32('platformDataLength'),
fu.uint32('reserved'), fu.uint32('reserved'),
fu.struct('platformDataOffset', [ uint64('platformDataOffset'), // Absolute byte offset of the locator data.
// Absolute byte offset of the locator data.
fu.uint32('high'),
fu.uint32('low'),
]),
], ],
VHD_PARENT_LOCATOR_ENTRIES VHD_PARENT_LOCATOR_ENTRIES
), ),
@ -123,16 +116,14 @@ const fuHeader = fu.struct([
// Helpers // Helpers
// =================================================================== // ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32) const computeBatSize = entries =>
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
// Returns a 32 bits integer corresponding to a Vhd version. // Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff) const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
// Sectors conversions. // Sectors conversions.
const sectorsRoundUp = bytes => const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
Math.floor((bytes + VHD_SECTOR_SIZE - 1) / VHD_SECTOR_SIZE)
const sectorsRoundUpNoZero = bytes => sectorsRoundUp(bytes) || 1
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
// Check/Set a bit on a vhd map. // Check/Set a bit on a vhd map.
@ -163,26 +154,39 @@ const unpackField = (field, buf) => {
// Returns the checksum of a raw struct. // Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum. // The raw struct (footer or header) is altered with the new sum.
function checksumStruct (rawStruct, struct) { function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum const checksumField = struct.fields.checksum
let sum = 0 let sum = 0
// Reset current sum. // Do not use the stored checksum to compute the new checksum.
packField(checksumField, 0, rawStruct) const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
for (let i = 0, n = struct.size; i < n; i++) { sum += buf[i]
sum = (sum + rawStruct[i]) & 0xffffffff }
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
} }
sum = 0xffffffff - sum sum = ~sum >>> 0
// Write new sum. // Write new sum.
packField(checksumField, sum, rawStruct) packField(checksumField, sum, buf)
return sum return sum
} }
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// =================================================================== // ===================================================================
// Format: // Format:
@ -207,6 +211,10 @@ function checksumStruct (rawStruct, struct) {
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize // - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512 // - sectorSize = 512
export class Vhd { export class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
constructor (handler, path) { constructor (handler, path) {
this._handler = handler this._handler = handler
this._path = path this._path = path
@ -235,17 +243,10 @@ export class Vhd {
getEndOfHeaders () { getEndOfHeaders () {
const { header } = this const { header } = this
let end = uint32ToUint64(this.footer.dataOffset) + VHD_HEADER_SIZE let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
const blockAllocationTableSize = sectorsToBytes(
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
)
// Max(end, block allocation table end) // Max(end, block allocation table end)
end = Math.max( end = Math.max(end, header.tableOffset + this.batSize)
end,
uint32ToUint64(header.tableOffset) + blockAllocationTableSize
)
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) { for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i] const entry = header.parentLocatorEntry[i]
@ -253,8 +254,7 @@ export class Vhd {
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) { if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
end = Math.max( end = Math.max(
end, end,
uint32ToUint64(entry.platformDataOffset) + entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
sectorsToBytes(entry.platformDataSpace)
) )
} }
} }
@ -286,21 +286,16 @@ export class Vhd {
// Get the beginning (footer + header) of a vhd file. // Get the beginning (footer + header) of a vhd file.
async readHeaderAndFooter () { async readHeaderAndFooter () {
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE) const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
const sum = unpackField(fuFooter.fields.checksum, buf) assertChecksum('footer', bufFooter, fuFooter)
const sumToTest = checksumStruct(buf, fuFooter) assertChecksum('header', bufHeader, fuHeader)
// Checksum child & parent. const footer = (this.footer = fuFooter.unpack(bufFooter))
if (sumToTest !== sum) { assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
throw new Error(
`Bad checksum in vhd. Expected: ${sum}. Given: ${sumToTest}. (data=${buf.toString(
'hex'
)})`
)
}
const header = (this.header = fuHeader.unpack(buf.slice(VHD_FOOTER_SIZE))) const header = (this.header = fuHeader.unpack(bufHeader))
this.footer = fuFooter.unpack(buf)
// Compute the number of sectors in one block. // Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes. // Default: One block contains 4096 sectors of 512 bytes.
@ -330,13 +325,10 @@ export class Vhd {
// Returns a buffer that contains the block allocation table of a vhd file. // Returns a buffer that contains the block allocation table of a vhd file.
async readBlockTable () { async readBlockTable () {
const { header } = this const { header } = this
this.blockTable = await this._read(
const offset = uint32ToUint64(header.tableOffset) header.tableOffset,
const size = sectorsToBytes( header.maxTableEntries * VHD_ENTRY_SIZE
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
) )
this.blockTable = await this._read(offset, size)
} }
// return the first sector (bitmap) of a block // return the first sector (bitmap) of a block
@ -433,71 +425,70 @@ export class Vhd {
: fromEvent(data.pipe(stream), 'finish') : fromEvent(data.pipe(stream), 'finish')
} }
async ensureBatSize (size) { async _freeFirstBlockSpace (spaceNeededBytes) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= size) {
return
}
const tableOffset = uint32ToUint64(header.tableOffset)
// extend BAT
const maxTableEntries = (header.maxTableEntries = size)
const batSize = sectorsToBytes(
sectorsRoundUpNoZero(maxTableEntries * VHD_ENTRY_SIZE)
)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(batSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevBat.length)
debug(
`ensureBatSize: extend in memory BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
const extendBat = async () => {
debug(
`ensureBatSize: extend in file BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
return this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
tableOffset + prevBat.length
)
}
try { try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks() const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
if (tableOffset + batSize < sectorsToBytes(firstSector)) { const tableOffset = this.header.tableOffset
return Promise.all([extendBat(), this.writeHeader()]) const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
newMinSector
)
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const stream = await this._readStream(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(stream, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
} }
const { fullBlockSize } = this
const newFirstSector = lastSector + fullBlockSize / VHD_SECTOR_SIZE
debug(
`ensureBatSize: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const stream = await this._readStream(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(stream, sectorsToBytes(newFirstSector))
await extendBat()
await this._setBatEntry(first, newFirstSector)
await this.writeHeader()
await this.writeFooter()
} catch (e) { } catch (e) {
if (e.noBlock) { if (!e.noBlock) {
await extendBat()
await this.writeHeader()
await this.writeFooter()
} else {
throw e throw e
} }
} }
} }
async ensureBatSize (entries) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= entries) {
return
}
const newBatSize = computeBatSize(entries)
await this._freeFirstBlockSpace(newBatSize - this.batSize)
const maxTableEntries = (header.maxTableEntries = entries)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
}
// set the first sector (bitmap) of a block // set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) { _setBatEntry (block, blockSector) {
const i = block * VHD_ENTRY_SIZE const i = block * VHD_ENTRY_SIZE
@ -507,7 +498,7 @@ export class Vhd {
return this._write( return this._write(
blockTable.slice(i, i + VHD_ENTRY_SIZE), blockTable.slice(i, i + VHD_ENTRY_SIZE),
uint32ToUint64(this.header.tableOffset) + i this.header.tableOffset + i
) )
} }
@ -563,6 +554,9 @@ export class Vhd {
if (blockAddr === BLOCK_UNUSED) { if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id) blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
} }
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
@ -629,11 +623,13 @@ export class Vhd {
} }
// Write a context footer. (At the end and beginning of a vhd file.) // Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter () { async writeFooter (onlyEndFooter = false) {
const { footer } = this const { footer } = this
const offset = this.getEndOfData()
const rawFooter = fuFooter.pack(footer) const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter) footer.checksum = checksumStruct(rawFooter, fuFooter)
debug( debug(
@ -641,8 +637,9 @@ export class Vhd {
footer.checksum footer.checksum
}). (data=${rawFooter.toString('hex')})` }). (data=${rawFooter.toString('hex')})`
) )
if (!onlyEndFooter) {
await this._write(rawFooter, 0) await this._write(rawFooter, 0)
}
await this._write(rawFooter, offset) await this._write(rawFooter, offset)
} }
@ -658,6 +655,73 @@ export class Vhd {
) )
return this._write(rawHeader, offset) return this._write(rawHeader, offset)
} }
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
for (
let currentBlock = startBlock;
currentBlock <= lastBlock;
currentBlock++
) {
const offsetInBlockSectors = Math.max(
0,
offsetSectors - currentBlock * this.sectorsPerBlock
)
const endInBlockSectors = Math.min(
endBufferSectors - currentBlock * this.sectorsPerBlock,
this.sectorsPerBlock
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
VHD_SECTOR_SIZE,
buffer.length
)
let inputBuffer
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
} else {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * VHD_SECTOR_SIZE,
startInBuffer,
endInBuffer
)
}
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
)
}
await this.writeFooter()
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
firstLocatorOffset / VHD_SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
this.header.tableOffset += sectorsToBytes(deltaSectors)
await this._write(this.blockTable, this.header.tableOffset)
}
return firstLocatorOffset
}
} }
// Merge vhd child into vhd parent. // Merge vhd child into vhd parent.
@ -719,9 +783,9 @@ export default concurrency(2)(async function vhdMerge (
const cFooter = childVhd.footer const cFooter = childVhd.footer
const pFooter = parentVhd.footer const pFooter = parentVhd.footer
pFooter.currentSize = { ...cFooter.currentSize } pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry } pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = { ...cFooter.originalSize } pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid pFooter.uuid = cFooter.uuid
@ -743,30 +807,51 @@ export async function chainVhd (
parentHandler, parentHandler,
parentPath, parentPath,
childHandler, childHandler,
childPath childPath,
force = false
) { ) {
const parentVhd = new Vhd(parentHandler, parentPath) const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath) const childVhd = new Vhd(childHandler, childPath)
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
const { header } = childVhd await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
const parentName = relative(dirname(childPath), parentPath) if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
const parentUuid = parentVhd.footer.uuid if (!force) {
if ( throw new Error('cannot chain disk of type ' + footer.diskType)
header.parentUnicodeName !== parentName || }
!isEqual(header.parentUuid, parentUuid) footer.diskType = HARD_DISK_TYPE_DIFFERENCING
) {
header.parentUuid = parentUuid
header.parentUnicodeName = parentName
await childVhd.writeHeader()
return true
} }
return false await Promise.all([
childVhd.readBlockTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(parentName, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
await childVhd._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
dataSpaceSectors
)
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
await childVhd.writeHeader()
await childVhd.writeFooter()
return true
} }
export const createReadStream = asyncIteratorToStream(function * (handler, path) { export const createReadStream = asyncIteratorToStream(function * (handler, path) {
@ -797,10 +882,7 @@ export const createReadStream = asyncIteratorToStream(function * (handler, path)
// TODO: empty parentUuid and parentLocatorEntry-s in header // TODO: empty parentUuid and parentLocatorEntry-s in header
let header = { let header = {
...vhd.header, ...vhd.header,
tableOffset: { tableOffset: 512 + 1024,
high: 0,
low: 512 + 1024,
},
parentUnicodeName: '', parentUnicodeName: '',
} }
@ -815,9 +897,7 @@ export const createReadStream = asyncIteratorToStream(function * (handler, path)
const sectorsPerBlock = const sectorsPerBlock =
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
const nBlocks = Math.ceil( const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
uint32ToUint64(footer.currentSize) / header.blockSize
)
const blocksOwner = new Array(nBlocks) const blocksOwner = new Array(nBlocks)
for ( for (

View File

@ -10,7 +10,7 @@ const formatFiles = files => {
const testFiles = files => const testFiles = files =>
run( run(
'./node_modules/.bin/jest', './node_modules/.bin/jest',
['--findRelatedTests', '--passWithNoTests'].concat(files) ['--testRegex=^(?!.*.integ.spec.js$).*.spec.js$', '--findRelatedTests', '--passWithNoTests'].concat(files)
) )
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

View File

@ -701,10 +701,6 @@
dependencies: dependencies:
pako "^1.0.3" pako "^1.0.3"
"@nraynaud/struct-fu@^1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@nraynaud/struct-fu/-/struct-fu-1.0.1.tgz#059a0588dea50647c3677783692dafdadfcadf97"
"@nraynaud/xo-fs@^0.0.5": "@nraynaud/xo-fs@^0.0.5":
version "0.0.5" version "0.0.5"
resolved "https://registry.yarnpkg.com/@nraynaud/xo-fs/-/xo-fs-0.0.5.tgz#0f8c525440909223904b6841a37f4d255baa54b3" resolved "https://registry.yarnpkg.com/@nraynaud/xo-fs/-/xo-fs-0.0.5.tgz#0f8c525440909223904b6841a37f4d255baa54b3"
@ -10886,7 +10882,7 @@ strip-json-comments@~2.0.1:
version "2.0.1" version "2.0.1"
resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a"
struct-fu@^1.0.0: struct-fu@^1.0.0, struct-fu@^1.2.0:
version "1.2.0" version "1.2.0"
resolved "https://registry.yarnpkg.com/struct-fu/-/struct-fu-1.2.0.tgz#a40b9eb60a41bb341228cff125fde4887daa85ac" resolved "https://registry.yarnpkg.com/struct-fu/-/struct-fu-1.2.0.tgz#a40b9eb60a41bb341228cff125fde4887daa85ac"