feat(vhd-lib/merge): use Vhd* classes (#5950)

This commit is contained in:
Florent BEAUCHAMP
2021-11-18 11:30:04 +01:00
committed by GitHub
parent 1f47aa491d
commit d7ee13f98d
13 changed files with 461 additions and 243 deletions

View File

@@ -76,6 +76,7 @@ export default class RemoteHandlerAbstract {
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
this.closeFile = sharedLimit(this.closeFile)
this.copy = sharedLimit(this.copy)
this.getInfo = sharedLimit(this.getInfo)
this.getSize = sharedLimit(this.getSize)
this.list = sharedLimit(this.list)
@@ -307,6 +308,17 @@ export default class RemoteHandlerAbstract {
return p
}
async copy(oldPath, newPath, { checksum = false } = {}) {
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
let p = timeout.call(this._copy(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._copy(checksumFile(oldPath), checksumFile(newPath))])
}
return p
}
async rmdir(dir) {
await timeout.call(this._rmdir(normalizePath(dir)).catch(ignoreEnoent), this._timeout)
}
@@ -519,6 +531,9 @@ export default class RemoteHandlerAbstract {
async _rename(oldPath, newPath) {
throw new Error('Not implemented')
}
async _copy(oldPath, newPath) {
throw new Error('Not implemented')
}
async _rmdir(dir) {
throw new Error('Not implemented')

View File

@@ -33,6 +33,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
return fs.close(fd)
}
async _copy(oldPath, newPath) {
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)

View File

@@ -51,6 +51,27 @@ export default class S3Handler extends RemoteHandlerAbstract {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _copy(oldPath, newPath) {
const size = await this._getSize(oldPath)
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
try {
const parts = []
let start = 0
while (start < size) {
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
const upload = await this._s3.uploadPartCopy(partParams)
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
start += MAX_PART_SIZE
}
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams)
throw e
}
}
async _isNotEmptyDir(path) {
const result = await this._s3.listObjectsV2({
Bucket: this._bucket,
@@ -147,25 +168,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
// nothing to do, directories do not exist, they are part of the files' path
}
// s3 doesn't have a rename operation, so copy + delete source
async _rename(oldPath, newPath) {
const size = await this._getSize(oldPath)
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
const param2 = { ...multipartParams, CopySource: `/${this._bucket}/${this._dir}${oldPath}` }
try {
const parts = []
let start = 0
while (start < size) {
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
const upload = await this._s3.uploadPartCopy(partParams)
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
start += MAX_PART_SIZE
}
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
} catch (e) {
await this._s3.abortMultipartUpload(multipartParams)
throw e
}
await this.copy(oldPath, newPath)
await this._s3.deleteObject(this._createParams(oldPath))
}

View File

@@ -203,6 +203,7 @@ test('it can create a vhd stream', async () => {
it('can stream content', async () => {
const initalSizeMb = 5 // 2 block and an half
const initialNbBlocks = Math.ceil(initalSizeMb / 2)
const initialByteSize = initalSizeMb * 1024 * 1024
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSizeMb)

View File

@@ -100,8 +100,10 @@ export class VhdAbstract {
*
* @returns {number} the merged data size
*/
coalesceBlock(child, blockId) {
throw new Error(`coalescing the block ${blockId} from ${child} is not implemented`)
async coalesceBlock(child, blockId) {
const block = await child.readBlock(blockId)
await this.writeEntireBlock(block)
return block.data.length
}
/**

View File

@@ -0,0 +1,67 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { openVhd } from '../openVhd'
import { createRandomFile, convertFromRawToVhd, convertToVhdDirectory } from '../tests/utils'
let tempDir = null
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('Can coalesce block', async () => {
const initalSize = 4
const parentrawFileName = `${tempDir}/randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const parentDirectoryName = `${tempDir}/parent.dir.vhd`
await createRandomFile(parentrawFileName, initalSize)
await convertFromRawToVhd(parentrawFileName, parentFileName)
await convertToVhdDirectory(parentrawFileName, parentFileName, parentDirectoryName)
const childrawFileName = `${tempDir}/randomfile`
const childFileName = `${tempDir}/childFile.vhd`
await createRandomFile(childrawFileName, initalSize)
await convertFromRawToVhd(childrawFileName, childFileName)
const childRawDirectoryName = `${tempDir}/randomFile2.vhd`
const childDirectoryFileName = `${tempDir}/childDirFile.vhd`
const childDirectoryName = `${tempDir}/childDir.vhd`
await createRandomFile(childRawDirectoryName, initalSize)
await convertFromRawToVhd(childRawDirectoryName, childDirectoryFileName)
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentDirectoryName, { flags: 'w' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
expect(parentBlockData.equals(childBlockData)).toEqual(true)
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data
childBlockData = (await childDirectoryVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
})
})

View File

@@ -1,4 +1,4 @@
import { buildHeader, buildFooter } from './_utils'
import { buildHeader, buildFooter, sectorsToBytes } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { test, set as setBitmap } from '../_bitmap'
@@ -39,8 +39,8 @@ export class VhdDirectory extends VhdAbstract {
this.#uncheckedBlockTable = blockTable
}
static async open(handler, path) {
const vhd = new VhdDirectory(handler, path)
static async open(handler, path, { flags = 'r+' } = {}) {
const vhd = new VhdDirectory(handler, path, { flags })
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
@@ -54,19 +54,20 @@ export class VhdDirectory extends VhdAbstract {
}
}
static async create(handler, path) {
static async create(handler, path, { flags = 'wx+' } = {}) {
await handler.mkdir(path)
const vhd = new VhdDirectory(handler, path)
const vhd = new VhdDirectory(handler, path, { flags })
return {
dispose: () => {},
value: vhd,
}
}
constructor(handler, path) {
constructor(handler, path, opts) {
super()
this._handler = handler
this._path = path
this._opts = opts
}
async readBlockAllocationTable() {
@@ -78,13 +79,13 @@ export class VhdDirectory extends VhdAbstract {
return test(this.#blockTable, blockId)
}
getChunkPath(partName) {
_getChunkPath(partName) {
return this._path + '/' + partName
}
async _readChunk(partName) {
// here we can implement compression and / or crypto
const buffer = await this._handler.readFile(this.getChunkPath(partName))
const buffer = await this._handler.readFile(this._getChunkPath(partName))
return {
buffer: Buffer.from(buffer),
@@ -92,10 +93,14 @@ export class VhdDirectory extends VhdAbstract {
}
async _writeChunk(partName, buffer) {
assert(Buffer.isBuffer(buffer))
assert.notStrictEqual(
this._opts?.flags,
'r',
`Can't write a chunk ${partName} in ${this._path} with read permission`
)
// here we can implement compression and / or crypto
// chunks can be in sub directories : create direcotries if necessary
// chunks can be in sub directories : create directories if necessary
const pathParts = partName.split('/')
let currentPath = this._path
@@ -104,8 +109,7 @@ export class VhdDirectory extends VhdAbstract {
currentPath += '/' + pathParts[i]
await this._handler.mkdir(currentPath)
}
return this._handler.writeFile(this.getChunkPath(partName), buffer)
return this._handler.writeFile(this._getChunkPath(partName), buffer, this._opts)
}
// put block in subdirectories to limit impact when doing directory listing
@@ -167,11 +171,18 @@ export class VhdDirectory extends VhdAbstract {
return this._writeChunk('bat', this.#blockTable)
}
// only works if data are in the same bucket
// only works if data are in the same handler
// and if the full block is modified in child ( which is the case whit xcp)
coalesceBlock(child, blockId) {
this._handler.copy(child.getChunkPath(blockId), this.getChunkPath(blockId))
async coalesceBlock(child, blockId) {
if (!(child instanceof VhdDirectory) || this._handler !== child._handler) {
return super.coalesceBlock(child, blockId)
}
await this._handler.copy(
child._getChunkPath(child._getBlockPath(blockId)),
this._getChunkPath(this._getBlockPath(blockId))
)
return sectorsToBytes(this.sectorsPerBlock)
}
async writeEntireBlock(block) {

View File

@@ -6,13 +6,20 @@ import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { pFromCallback } from 'promise-toolbox'
import { Disposable, pFromCallback } from 'promise-toolbox'
import { randomBytes } from 'crypto'
import { VhdFile } from './VhdFile'
import { openVhd } from '../openVhd'
import { SECTOR_SIZE } from '../_constants'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from '../tests/utils'
import {
checkFile,
createRandomFile,
convertFromRawToVhd,
convertToVhdDirectory,
recoverRawContent,
} from '../tests/utils'
let tempDir = null
@@ -26,6 +33,29 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('respect the checkSecondFooter flag', async () => {
const initalSize = 0
const rawFileName = `${tempDir}/randomfile`
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: `file://${tempDir}` })
const size = await handler.getSize('randomfile.vhd')
const fd = await handler.openFile('randomfile.vhd', 'r+')
const buffer = Buffer.alloc(512, 0)
// add a fake footer at the end
handler.write(fd, buffer, size)
await handler.closeFile(fd)
// not using openVhd to be able to call readHeaderAndFooter separatly
const vhd = new VhdFile(handler, 'randomfile.vhd')
await expect(async () => await vhd.readHeaderAndFooter()).rejects.toThrow()
await expect(async () => await vhd.readHeaderAndFooter(true)).rejects.toThrow()
await expect(await vhd.readHeaderAndFooter(false)).toEqual(undefined)
})
test('blocks can be moved', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile`
@@ -58,6 +88,7 @@ test('the BAT MSB is not used for sign', async () => {
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
await vhd.writeFooter()
// we recover the data manually for speed reasons.
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
@@ -162,3 +193,45 @@ test('BAT can be extended and blocks moved', async () => {
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(rawFileName))
})
test('Can coalesce block', async () => {
const initalSize = 4
const parentrawFileName = `${tempDir}/randomfile`
const parentFileName = `${tempDir}/parent.vhd`
await createRandomFile(parentrawFileName, initalSize)
await convertFromRawToVhd(parentrawFileName, parentFileName)
const childrawFileName = `${tempDir}/randomfile`
const childFileName = `${tempDir}/childFile.vhd`
await createRandomFile(childrawFileName, initalSize)
await convertFromRawToVhd(childrawFileName, childFileName)
const childRawDirectoryName = `${tempDir}/randomFile2.vhd`
const childDirectoryFileName = `${tempDir}/childDirFile.vhd`
const childDirectoryName = `${tempDir}/childDir.vhd`
await createRandomFile(childRawDirectoryName, initalSize)
await convertFromRawToVhd(childRawDirectoryName, childDirectoryFileName)
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentFileName, { flags: 'r+' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.coalesceBlock(childFileVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
let parentBlockData = (await parentVhd.readBlock(0)).data
let childBlockData = (await childFileVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
await parentVhd.coalesceBlock(childDirectoryVhd, 0)
await parentVhd.writeFooter()
await parentVhd.writeBlockAllocationTable()
parentBlockData = (await parentVhd.readBlock(0)).data
childBlockData = (await childDirectoryVhd.readBlock(0)).data
expect(parentBlockData).toEqual(childBlockData)
})
})

View File

@@ -9,7 +9,7 @@ import {
import { computeBatSize, sectorsToBytes, buildHeader, buildFooter, BUF_BLOCK_UNUSED } from './_utils'
import { createLogger } from '@xen-orchestra/log'
import { fuFooter, fuHeader, checksumStruct } from '../_structs'
import { set as mapSetBit, test as mapTestBit } from '../_bitmap'
import { set as mapSetBit } from '../_bitmap'
import { VhdAbstract } from './VhdAbstract'
import assert from 'assert'
import getFirstAndLastBlocks from '../_getFirstAndLastBlocks'
@@ -78,23 +78,23 @@ export class VhdFile extends VhdAbstract {
return super.header
}
static async open(handler, path) {
const fd = await handler.openFile(path, 'r+')
static async open(handler, path, { flags, checkSecondFooter = true } = {}) {
const fd = await handler.openFile(path, flags ?? 'r+')
const vhd = new VhdFile(handler, fd)
// openning a file for reading does not trigger EISDIR as long as we don't really read from it :
// https://man7.org/linux/man-pages/man2/open.2.html
// EISDIR pathname refers to a directory and the access requested
// involved writing (that is, O_WRONLY or O_RDWR is set).
// reading the header ensure we have a well formed file immediatly
await vhd.readHeaderAndFooter()
await vhd.readHeaderAndFooter(checkSecondFooter)
return {
dispose: () => handler.closeFile(fd),
value: vhd,
}
}
static async create(handler, path) {
const fd = await handler.openFile(path, 'wx')
static async create(handler, path, { flags } = {}) {
const fd = await handler.openFile(path, flags ?? 'wx')
const vhd = new VhdFile(handler, fd)
return {
dispose: () => handler.closeFile(fd),
@@ -343,47 +343,6 @@ export class VhdFile extends VhdAbstract {
)
}
async coalesceBlock(child, blockId) {
const block = await child.readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
let parentBitmap = null
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
continue
}
let endSector = i + 1
// Count changed sectors.
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this.readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
}
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter(onlyEndFooter = false) {
const { footer } = this

View File

@@ -1,6 +1,5 @@
/* eslint-env jest */
import execa from 'execa'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
@@ -9,8 +8,7 @@ import { pFromCallback } from 'promise-toolbox'
import { VhdFile, chainVhd, mergeVhd as vhdMerge } from './index'
import { SECTOR_SIZE } from './_constants'
import { checkFile, createRandomFile, convertFromRawToVhd, recoverRawContent } from './tests/utils'
import { checkFile, createRandomFile, convertFromRawToVhd } from './tests/utils'
let tempDir = null
@@ -24,55 +22,136 @@ afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
})
test('coalesce works in normal cases', async () => {
const mbOfRandom = 5
const randomFileName = `${tempDir}/randomfile`
const random2FileName = `${tempDir}/randomfile2`
const smallRandomFileName = `${tempDir}/small_randomfile`
test('merge works in normal cases', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const child2FileName = `${tempDir}/child2.vhd`
const recoveredFileName = `${tempDir}/recovered`
await createRandomFile(randomFileName, mbOfRandom)
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
await execa('qemu-img', ['create', '-fvpc', parentFileName, mbOfRandom + 1 + 'M'])
await checkFile(parentFileName)
await convertFromRawToVhd(randomFileName, child1FileName)
const handler = getHandler({ url: 'file://' })
await execa('vhd-util', ['snapshot', '-n', child2FileName, '-p', child1FileName])
const vhd = new VhdFile(handler, child2FileName)
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
const originalSize = await handler._getSize(randomFileName)
await checkFile(child1FileName)
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, child1FileName)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
await checkFile(child1FileName)
await chainVhd(handler, child1FileName, handler, child2FileName, true)
await checkFile(child2FileName)
const smallRandom = await fs.readFile(smallRandomFileName)
const newVhd = new VhdFile(handler, child2FileName)
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile(child2FileName)
await checkFile(child1FileName)
await checkFile(parentFileName)
// merge
await vhdMerge(handler, parentFileName, handler, child1FileName)
// check that vhd is still valid
await checkFile(parentFileName)
await chainVhd(handler, parentFileName, handler, child2FileName, true)
await checkFile(child2FileName)
await vhdMerge(handler, parentFileName, handler, child2FileName)
await checkFile(parentFileName)
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
await execa('cp', [randomFileName, random2FileName])
const fd = await fs.open(random2FileName, 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
const parentVhd = new VhdFile(handler, parentFileName)
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
const blockContent = block.data
const file = offset < mbOfChildren * 1024 * 1024 ? childRandomFileName : parentRandomFileName
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
}
})
test('it can resume a merge ', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const handler = getHandler({ url: `file://${tempDir}` })
await createRandomFile(`${tempDir}/randomfile`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/randomfile`, `${tempDir}/parent.vhd`)
const parentVhd = new VhdFile(handler, 'parent.vhd')
await parentVhd.readHeaderAndFooter()
await createRandomFile(`${tempDir}/small_randomfile`, mbOfChildren)
await convertFromRawToVhd(`${tempDir}/small_randomfile`, `${tempDir}/child1.vhd`)
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
const childVhd = new VhdFile(handler, 'child1.vhd')
await childVhd.readHeaderAndFooter()
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: 'NOT CHILD HEADER ',
},
})
)
// expect merge to fail since child header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
await handler.unlink('.parent.vhd.merge.json')
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: 'NOT PARENT HEADER',
},
child: {
header: childVhd.header.checksum,
},
})
)
// expect merge to fail since parent header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
// break the end footer of parent
const size = await handler.getSize('parent.vhd')
const fd = await handler.openFile('parent.vhd', 'r+')
const buffer = Buffer.alloc(512, 0)
// add a fake footer at the end
handler.write(fd, buffer, size)
await handler.closeFile(fd)
// check vhd should fail
await expect(async () => await parentVhd.readHeaderAndFooter()).rejects.toThrow()
await handler.unlink('.parent.vhd.merge.json')
await handler.writeFile(
'.parent.vhd.merge.json',
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
},
child: {
header: childVhd.header.checksum,
},
currentBlock: 1,
})
)
// really merge
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
// reload header footer and block allocation table , they should succed
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
const blockContent = block.data
// first block is marked as already merged, should not be modified
// second block should come from children
// then two block only in parent
const file = block.id === 1 ? childRandomFileName : parentRandomFileName
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
}
expect(await fs.readFile(recoveredFileName)).toEqual(await fs.readFile(random2FileName))
})

View File

@@ -5,9 +5,10 @@ import noop from './_noop'
import { createLogger } from '@xen-orchestra/log'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { VhdFile } from '.'
import { openVhd } from '.'
import { basename, dirname } from 'path'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
import { Disposable } from 'promise-toolbox'
const { warn } = createLogger('vhd-lib:merge')
@@ -23,109 +24,99 @@ export default limitConcurrency(2)(async function merge(
) {
const mergeStatePath = dirname(parentPath) + '/' + '.' + basename(parentPath) + '.merge.json'
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new VhdFile(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new VhdFile(childHandler, childFd)
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
if (error.code !== 'ENOENT') {
throw error
}
// no merge state in case of missing file
})
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(
// dont check VHD is complete if recovering a merge
mergeState === undefined
),
childVhd.readHeaderAndFooter(),
])
if (mergeState !== undefined) {
mergeState = JSON.parse(mergeState)
// ensure the correct merge will be continued
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
} else {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPE_DIFFERENCING || parentDiskType === DISK_TYPE_DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
return await Disposable.use(async function* () {
let mergeState = await parentHandler.readFile(mergeStatePath).catch(error => {
if (error.code !== 'ENOENT') {
throw error
}
// no merge state in case of missing file
})
// during merging, the end footer of the parent can be overwritten by new blocks
// we should use it as a way to check vhd health
const parentVhd = yield openVhd(parentHandler, parentPath, {
flags: 'r+',
checkSecondFooter: mergeState === undefined,
})
const childVhd = yield openVhd(childHandler, childPath)
if (mergeState !== undefined) {
mergeState = JSON.parse(mergeState)
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockAllocationTable(), childVhd.readBlockAllocationTable()])
// ensure the correct merge will be continued
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
} else {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
const { maxTableEntries } = childVhd.header
if (mergeState === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
mergeState = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
}
// finds first allocated block for the 2 following loops
while (mergeState.currentBlock < maxTableEntries && !childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
}
// counts number of allocated blocks
let nBlocks = 0
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
nBlocks += 1
}
}
onProgress({ total: nBlocks, done: 0 })
// merges blocks
for (let i = 0; i < nBlocks; ++i, ++mergeState.currentBlock) {
while (!childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
await parentHandler.writeFile(mergeStatePath, JSON.stringify(mergeState), { flags: 'w' }).catch(warn)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, mergeState.currentBlock)
onProgress({
total: nBlocks,
done: i + 1,
})
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergeState.mergedDataSize
} finally {
await childHandler.closeFile(childFd)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPE_DIFFERENCING || parentDiskType === DISK_TYPE_DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
}
} finally {
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockAllocationTable(), childVhd.readBlockAllocationTable()])
const { maxTableEntries } = childVhd.header
if (mergeState === undefined) {
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
mergeState = {
child: { header: childVhd.header.checksum },
parent: { header: parentVhd.header.checksum },
currentBlock: 0,
mergedDataSize: 0,
}
// finds first allocated block for the 2 following loops
while (mergeState.currentBlock < maxTableEntries && !childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
}
// counts number of allocated blocks
let nBlocks = 0
for (let block = mergeState.currentBlock; block < maxTableEntries; block++) {
if (childVhd.containsBlock(block)) {
nBlocks += 1
}
}
onProgress({ total: nBlocks, done: 0 })
// merges blocks
for (let i = 0; i < nBlocks; ++i, ++mergeState.currentBlock) {
while (!childVhd.containsBlock(mergeState.currentBlock)) {
++mergeState.currentBlock
}
await parentHandler.writeFile(mergeStatePath, JSON.stringify(mergeState), { flags: 'w' }).catch(warn)
mergeState.mergedDataSize += await parentVhd.coalesceBlock(childVhd, mergeState.currentBlock)
onProgress({
total: nBlocks,
done: i + 1,
})
}
// some blocks could have been created or moved in parent : write bat
await parentVhd.writeBlockAllocationTable()
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
// should be a disposable
parentHandler.unlink(mergeStatePath).catch(warn)
await parentHandler.closeFile(parentFd)
}
return mergeState.mergedDataSize
})
})

View File

@@ -1,14 +1,14 @@
import { resolveAlias } from './_resolveAlias'
import { VhdFile, VhdDirectory } from './'
export async function openVhd(handler, path) {
export async function openVhd(handler, path, opts) {
const resolved = await resolveAlias(handler, path)
try {
return await VhdFile.open(handler, resolved)
return await VhdFile.open(handler, resolved, opts)
} catch (e) {
if (e.code !== 'EISDIR') {
throw e
}
return await VhdDirectory.open(handler, resolved)
return await VhdDirectory.open(handler, resolved, opts)
}
}

View File

@@ -42,6 +42,7 @@ export async function convertFromVmdkToRaw(vmdkName, rawName) {
}
export async function recoverRawContent(vhdName, rawName, originalSize) {
// todo should use createContentStream
await checkFile(vhdName)
await convertFromVhdToRaw(vhdName, rawName)
if (originalSize !== undefined) {
@@ -49,12 +50,9 @@ export async function recoverRawContent(vhdName, rawName, originalSize) {
}
}
export async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdir(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
// @ todo how can I call vhd-cli copy from here
export async function convertToVhdDirectory(rawFileName, vhdFileName, path) {
fs.mkdirp(path)
const srcVhd = await fs.open(vhdFileName, 'r')
@@ -75,13 +73,26 @@ export async function createRandomVhdDirectory(path, sizeMB) {
// copy blocks
const srcRaw = await fs.open(rawFileName, 'r')
const blockDataSize = 512 * 4096
const bitmap = Buffer.alloc(4096)
// make a block bitmap full of 1, marking all sectors of the block as used
const bitmap = Buffer.alloc(512, 255)
await fs.mkdir(path + '/blocks/')
await fs.mkdir(path + '/blocks/1/')
await fs.mkdir(path + '/blocks/0/')
const stats = await fs.stat(rawFileName)
const sizeMB = stats.size / 1024 / 1024
for (let i = 0, offset = 0; i < sizeMB; i++, offset += blockDataSize) {
const blockData = Buffer.alloc(blockDataSize)
await fs.read(srcRaw, blockData, offset)
await fs.writeFile(path + '/blocks/1/' + i, Buffer.concat([bitmap, blockData]))
await fs.writeFile(path + '/blocks/0/' + i, Buffer.concat([bitmap, blockData]))
}
await fs.close(srcRaw)
}
export async function createRandomVhdDirectory(path, sizeMB) {
fs.mkdirp(path)
const rawFileName = `${path}/temp.raw`
await createRandomFile(rawFileName, sizeMB)
const vhdFileName = `${path}/temp.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await convertToVhdDirectory(rawFileName, vhdFileName, path)
}