test: rework tests following 05161bd4df

Test of cleanVm are still failin , untill we fix the error condition of cleanVm broken vhd removing

- don't use handler to / (need root to run)
- don't create file at the root of the remote (conflict with the metadata and encryption.json)
- test more unhappy paths
This commit is contained in:
Florent Beauchamp 2022-10-24 18:00:40 +02:00 committed by Julien Fontanet
parent 14e205ab69
commit 0cf6f94677
7 changed files with 176 additions and 106 deletions

View File

@ -14,7 +14,8 @@ const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
const rootPath = 'xo-vm-backups/VMUUID/'
jest.setTimeout(60000)
@ -25,7 +26,8 @@ beforeEach(async () => {
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
basePath = `vdis/${jobId}/${vdiId}`
relativePath = `vdis/${jobId}/${vdiId}`
basePath = `${rootPath}/${relativePath}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
@ -81,13 +83,13 @@ test('It remove broken vhd', async () => {
const logInfo = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm(rootPath, { remove: false, logInfo, logWarn: logInfo, lock: false })
expect(loggued).toEqual(`VHD check error`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
expect(await handler.list(basePath)).toEqual(['notReallyAVhd.vhd'])
// really remove it
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
expect((await handler.list(basePath)).length).toEqual(0)
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
expect(await handler.list(basePath)).toEqual([])
})
test('it remove vhd with missing or multiple ancestors', async () => {
@ -121,7 +123,7 @@ test('it remove vhd with missing or multiple ancestors', async () => {
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
@ -132,12 +134,12 @@ test('it remove vhd with missing or multiple ancestors', async () => {
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
`${relativePath}/orphan.vhd`,
`${relativePath}/child.vhd`,
// abandonned.json is not here
],
})
@ -160,39 +162,39 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
`deleted.vhd`, // in metadata but not in vhds
`orphan.vhd`,
`child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
`${relativePath}/grandchild.vhd`, // grand child should not be merged
`${relativePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
@ -219,15 +221,15 @@ test('it merges delta of non destroyed chain', async () => {
const logInfo = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
const [merging] = loggued
expect(merging).toEqual(`merging VHD chain`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children after the merge
expect(metadata.size).toEqual(209920)
@ -241,11 +243,11 @@ test('it merges delta of non destroyed chain', async () => {
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
vhds: [`${relativePath}/orphan.vhd`, `${relativePath}/child.vhd`],
})
)
@ -271,7 +273,7 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@ -367,20 +369,20 @@ describe('tests multiple combination ', () => {
// the metadata file
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${relativePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children + clean after the merge
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
@ -414,7 +416,7 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
await adapter.cleanVm(rootPath, { remove: true, logWarn: () => {}, lock: false })
expect(await handler.list(basePath)).toEqual([])
})

View File

@ -122,14 +122,14 @@ describe('encryption', () => {
})
it('sync should NOT create metadata if missing (not encrypted)', async () => {
handler = getHandler({ url: `file://${dir}` })
await handler._checkMetadata()
await handler.sync()
expect(await fs.readdir(dir)).toEqual([])
})
it('sync should create metadata if missing (encrypted)', async () => {
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` })
await handler._checkMetadata()
await handler.sync()
expect(await fs.readdir(dir)).toEqual(['encryption.json', 'metadata.json'])
@ -140,11 +140,11 @@ describe('encryption', () => {
})
it('sync should not modify existing metadata', async () => {
handler = getHandler({ url: `file://${dir}` })
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "none"}`)
await fs.writeFile(`${dir}/metadata.json`, `{"random": "NOTSORANDOM"}`)
handler = getHandler({ url: `file://${dir}` })
await handler._checkMetadata()
await handler.sync()
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
@ -154,16 +154,16 @@ describe('encryption', () => {
it('should modify metadata if empty', async () => {
handler = getHandler({ url: `file://${dir}` })
await handler._checkMetadata()
await handler.sync()
await handler.forget()
// nothing created without encryption
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` })
await handler._checkMetadata()
await handler.sync()
let encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
await handler.forget()
handler = getHandler({ url: `file://${dir}` })
await handler._checkMetadata()
await handler.sync()
encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
})
@ -175,7 +175,7 @@ describe('encryption', () => {
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` })
await handler._checkMetadata()
await handler.sync()
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
@ -191,12 +191,12 @@ describe('encryption', () => {
// different key but empty remote => ok
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` })
await expect(handler._checkMetadata()).resolves.not.toThrowError()
await expect(handler.sync()).resolves.not.toThrowError()
// rmote is now non empty : can't modify key anymore
await fs.writeFile(`${dir}/nonempty.json`, 'content')
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd10"` })
await expect(handler._checkMetadata()).rejects.toThrowError()
await expect(handler.sync()).rejects.toThrowError()
})
it('sync should fail when changing algorithm', async () => {
@ -210,6 +210,6 @@ describe('encryption', () => {
await fs.writeFile(`${dir}/nonempty.json`, 'content')
handler = getHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` })
await expect(handler._checkMetadata()).rejects.toThrowError()
await expect(handler.sync()).rejects.toThrowError()
})
})

View File

@ -36,9 +36,11 @@
- @vates/otp major
- @vates/predicates minor
- @vates/read-chunk patch
- @xen-orchestra/backups patch
- @xen-orchestra/fs minor
- @xen-orchestra/log minor
- @xen-orchestra/mixins patch
- vhd-cli patch
- vhd-lib patch
- xo-remote-parser patch
- xo-server minor
- xo-server-transport-nagios patch

View File

@ -5,22 +5,28 @@
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const { getHandler, getSyncedHandler } = require('@xen-orchestra/fs')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
const { openVhd, VhdDirectory } = require('../')
const { createRandomFile, convertFromRawToVhd, convertToVhdDirectory } = require('../tests/utils')
let tempDir = null
let handler
let disposeHandler
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
const d = await getSyncedHandler({ url: `file://${tempDir}` })
handler = d.value
disposeHandler = d.dispose
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
disposeHandler()
})
test('Can coalesce block', async () => {
@ -45,12 +51,11 @@ test('Can coalesce block', async () => {
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentDirectoryName, { flags: 'w' })
const parentVhd = yield openVhd(handler, 'parent.dir.vhd', { flags: 'w' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
const childFileVhd = yield openVhd(handler, 'childFile.vhd')
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
const childDirectoryVhd = yield openVhd(handler, 'childDir.vhd')
await childDirectoryVhd.readBlockAllocationTable()
let childBlockData = (await childDirectoryVhd.readBlock(0)).data
@ -83,7 +88,6 @@ test('compressed blocks and metadata works', async () => {
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdName)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'parent.vhd')
await vhd.readBlockAllocationTable()
const compressedVhd = yield VhdDirectory.create(handler, 'compressed.vhd', { compression: 'gzip' })

View File

@ -7,7 +7,7 @@ const fs = require('fs-extra')
const getStream = require('get-stream')
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getHandler } = require('@xen-orchestra/fs')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
const { randomBytes } = require('crypto')
@ -24,15 +24,22 @@ const {
} = require('../tests/utils')
let tempDir = null
let handler
let disposeHandler
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
const d = await getSyncedHandler({ url: `file://${tempDir}` })
handler = d.value
disposeHandler = d.dispose
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
disposeHandler()
})
test('respect the checkSecondFooter flag', async () => {
@ -42,8 +49,6 @@ test('respect the checkSecondFooter flag', async () => {
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: `file://${tempDir}` })
const size = await handler.getSize('randomfile.vhd')
const fd = await handler.openFile('randomfile.vhd', 'r+')
const buffer = Buffer.alloc(512, 0)
@ -64,9 +69,8 @@ test('blocks can be moved', async () => {
await createRandomFile(rawFileName, initalSize)
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, vhdFileName)
const originalSize = await handler.getSize('randomfile')
const newVhd = new VhdFile(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
@ -79,8 +83,7 @@ test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
const emptyFileName = `${tempDir}/empty.vhd`
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
const handler = getHandler({ url: 'file://' })
const vhd = new VhdFile(handler, emptyFileName)
const vhd = new VhdFile(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
@ -98,7 +101,7 @@ test('the BAT MSB is not used for sign', async () => {
const recoveredFileName = `${tempDir}/recovered`
const recoveredFile = await fs.open(recoveredFileName, 'w')
try {
const vhd2 = new VhdFile(handler, emptyFileName)
const vhd2 = new VhdFile(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
@ -126,9 +129,8 @@ test('writeData on empty file', async () => {
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
const originalSize = await handler.getSize('randomfile')
const newVhd = new VhdFile(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
@ -145,9 +147,8 @@ test('writeData in 2 non-overlaping operations', async () => {
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
const originalSize = await handler.getSize('randomfile')
const newVhd = new VhdFile(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
@ -165,9 +166,8 @@ test('writeData in 2 overlaping operations', async () => {
await createRandomFile(rawFileName, mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
const randomData = await fs.readFile(rawFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, emptyFileName)
const originalSize = await handler.getSize('randomfile')
const newVhd = new VhdFile(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
@ -185,9 +185,8 @@ test('BAT can be extended and blocks moved', async () => {
const vhdFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
await convertFromRawToVhd(rawFileName, vhdFileName)
const handler = getHandler({ url: 'file://' })
const originalSize = await handler.getSize(rawFileName)
const newVhd = new VhdFile(handler, vhdFileName)
const originalSize = await handler.getSize('randomfile')
const newVhd = new VhdFile(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
@ -214,12 +213,11 @@ test('Can coalesce block', async () => {
await convertToVhdDirectory(childRawDirectoryName, childDirectoryFileName, childDirectoryName)
await Disposable.use(async function* () {
const handler = getHandler({ url: 'file://' })
const parentVhd = yield openVhd(handler, parentFileName, { flags: 'r+' })
const parentVhd = yield openVhd(handler, 'parent.vhd', { flags: 'r+' })
await parentVhd.readBlockAllocationTable()
const childFileVhd = yield openVhd(handler, childFileName)
const childFileVhd = yield openVhd(handler, 'childFile.vhd')
await childFileVhd.readBlockAllocationTable()
const childDirectoryVhd = yield openVhd(handler, childDirectoryName)
const childDirectoryVhd = yield openVhd(handler, 'childDir.vhd')
await childDirectoryVhd.readBlockAllocationTable()
await parentVhd.mergeBlock(childFileVhd, 0)

View File

@ -5,7 +5,7 @@
const fs = require('fs-extra')
const rimraf = require('rimraf')
const tmp = require('tmp')
const { getHandler } = require('@xen-orchestra/fs')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const { VhdFile, chainVhd } = require('./index')
@ -14,15 +14,21 @@ const { _cleanupVhds: cleanupVhds, mergeVhdChain } = require('./merge')
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
let tempDir = null
let handler
let disposeHandler
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
const d = await getSyncedHandler({ url: `file://${tempDir}` })
handler = d.value
disposeHandler = d.dispose
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
disposeHandler()
})
test('merge works in normal cases', async () => {
@ -32,7 +38,6 @@ test('merge works in normal cases', async () => {
const childRandomFileName = `small_randomfile`
const parentFileName = `parent.vhd`
const child1FileName = `child1.vhd`
const handler = getHandler({ url: `file://${tempDir}` })
await createRandomFile(`${tempDir}/${parentRandomFileName}`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/${parentRandomFileName}`, `${tempDir}/${parentFileName}`)
@ -70,7 +75,6 @@ test('it can resume a simple merge ', async () => {
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const handler = getHandler({ url: `file://${tempDir}` })
await createRandomFile(`${tempDir}/randomfile`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/randomfile`, `${tempDir}/parent.vhd`)
@ -169,29 +173,28 @@ test('it can resume a multiple merge ', async () => {
const parentFileName = `${tempDir}/parent.vhd`
const childFileName = `${tempDir}/child.vhd`
const grandChildFileName = `${tempDir}/grandchild.vhd`
const handler = getHandler({ url: 'file://' })
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, childFileName)
await chainVhd(handler, parentFileName, handler, childFileName, true)
await chainVhd(handler, 'parent.vhd', handler, 'child.vhd', true)
await createRandomFile(grandChildRandomFileName, mbOfGrandChildren)
await convertFromRawToVhd(grandChildRandomFileName, grandChildFileName)
await chainVhd(handler, childFileName, handler, grandChildFileName, true)
await chainVhd(handler, 'child.vhd', handler, 'grandchild.vhd', true)
const parentVhd = new VhdFile(handler, parentFileName)
const parentVhd = new VhdFile(handler, 'parent.vhd')
await parentVhd.readHeaderAndFooter()
const childVhd = new VhdFile(handler, childFileName)
const childVhd = new VhdFile(handler, 'child.vhd')
await childVhd.readHeaderAndFooter()
const grandChildVhd = new VhdFile(handler, grandChildFileName)
const grandChildVhd = new VhdFile(handler, 'grandchild.vhd')
await grandChildVhd.readHeaderAndFooter()
await handler.writeFile(
`${tempDir}/.parent.vhd.merge.json`,
`.parent.vhd.merge.json`,
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
@ -205,12 +208,12 @@ test('it can resume a multiple merge ', async () => {
// should fail since the merge state file has only data of parent and child
await expect(
async () => await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
async () => await mergeVhdChain(handler, ['parent.vhd', 'child.vhd', 'grandchild.vhd'])
).rejects.toThrow()
// merge
await handler.unlink(`${tempDir}/.parent.vhd.merge.json`)
await handler.unlink(`.parent.vhd.merge.json`)
await handler.writeFile(
`${tempDir}/.parent.vhd.merge.json`,
`.parent.vhd.merge.json`,
JSON.stringify({
parent: {
header: parentVhd.header.checksum,
@ -219,11 +222,11 @@ test('it can resume a multiple merge ', async () => {
header: grandChildVhd.header.checksum,
},
currentBlock: 1,
childPath: [childVhd, grandChildVhd],
childPath: ['child.vhd', 'grandchild.vhd'],
})
)
// it should succeed
await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
await mergeVhdChain(handler, ['parent.vhd', 'child.vhd', 'grandchild.vhd'])
})
test('it merge multiple child in one pass ', async () => {
@ -236,25 +239,25 @@ test('it merge multiple child in one pass ', async () => {
const parentFileName = `${tempDir}/parent.vhd`
const childFileName = `${tempDir}/child.vhd`
const grandChildFileName = `${tempDir}/grandchild.vhd`
const handler = getHandler({ url: 'file://' })
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, childFileName)
await chainVhd(handler, parentFileName, handler, childFileName, true)
await chainVhd(handler, 'parent.vhd', handler, 'child.vhd', true)
await createRandomFile(grandChildRandomFileName, mbOfGrandChildren)
await convertFromRawToVhd(grandChildRandomFileName, grandChildFileName)
await chainVhd(handler, childFileName, handler, grandChildFileName, true)
await chainVhd(handler, 'child.vhd', handler, 'grandchild.vhd', true)
// merge
await mergeVhdChain(handler, [parentFileName, childFileName, grandChildFileName])
await mergeVhdChain(handler, ['parent.vhd', 'child.vhd', 'grandchild.vhd'])
// check that vhd is still valid
await checkFile(grandChildFileName)
const parentVhd = new VhdFile(handler, grandChildFileName)
const parentVhd = new VhdFile(handler, 'grandchild.vhd')
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
@ -277,8 +280,6 @@ test('it merge multiple child in one pass ', async () => {
})
test('it cleans vhd mergedfiles', async () => {
const handler = getHandler({ url: `file://${tempDir}` })
await handler.writeFile('parent', 'parentData')
await handler.writeFile('child1', 'child1Data')
await handler.writeFile('child2', 'child2Data')

View File

@ -4,6 +4,7 @@
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('node:fs/promises')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { Disposable, pFromCallback } = require('promise-toolbox')
@ -31,13 +32,13 @@ test('It opens a vhd file ( alias or not)', async () => {
const vhdFileName = `${tempDir}/randomfile.vhd`
await convertFromRawToVhd(rawFileName, vhdFileName)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' })
const vhd = yield openVhd(handler, vhdFileName)
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'randomfile.vhd')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
const aliasFileName = `${tempDir}/out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, vhdFileName)
const aliasFileName = `out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, 'randomfile.vhd')
const alias = yield openVhd(handler, aliasFileName)
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
@ -50,15 +51,77 @@ test('It opens a vhd directory', async () => {
await createRandomVhdDirectory(vhdDirectory, initalSize)
await Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: 'file://' })
const vhd = yield openVhd(handler, vhdDirectory)
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'randomfile.dir')
expect(vhd.header.cookie).toEqual('cxsparse')
expect(vhd.footer.cookie).toEqual('conectix')
const aliasFileName = `${tempDir}/out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, vhdDirectory)
const aliasFileName = `out.alias.vhd`
await VhdAbstract.createAlias(handler, aliasFileName, 'randomfile.dir')
const alias = yield openVhd(handler, aliasFileName)
expect(alias.header.cookie).toEqual('cxsparse')
expect(alias.footer.cookie).toEqual('conectix')
})
})
test('It fails correctly when opening a broken vhd', async () => {
const initalSize = 4
// emtpy file
await expect(
Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
yield openVhd(handler, 'randomfile.vhd')
})
).rejects.toThrow()
const rawFileName = `${tempDir}/randomfile.vhd`
await createRandomFile(rawFileName, initalSize)
// broken file
await expect(
Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
yield openVhd(handler, 'randomfile.vhd')
})
).rejects.toThrow()
// empty dir
await fs.mkdir(`${tempDir}/dir.vhd`)
await expect(
Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'dir.vhd')
await vhd.readBlockAllocationTable()
})
).rejects.toThrow()
// dir with missing parts
await createRandomVhdDirectory(`${tempDir}/dir.vhd`, initalSize)
const targets = ['header', 'footer', 'bat']
for (const target of targets) {
await fs.rename(`${tempDir}/dir.vhd/${target}`, `${tempDir}/dir.vhd/moved`)
await expect(
Disposable.use(async function* () {
const handler = yield getSyncedHandler({ url: `file://${tempDir}` })
const vhd = yield openVhd(handler, 'dir.vhd')
await vhd.readBlockAllocationTable()
})
).rejects.toThrow()
await fs.rename(`${tempDir}/dir.vhd/moved`, `${tempDir}/dir.vhd/${target}`)
}
})
test('It fails correctly when opening a vhdfile on an encrypted remote', async () => {
const initalSize = 4
const rawFileName = `${tempDir}/randomfile.vhd`
await expect(
Disposable.use(async function* () {
const handler = yield getSyncedHandler({
url: `file://${tempDir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"`,
})
await createRandomFile(rawFileName, initalSize)
yield openVhd(handler, 'randomfile.vhd')
})
).rejects.toThrow()
})