refactor(backups/IncrementalRemoteWriter): reuse parent path from checkBaseVdis
This commit is contained in:
parent
c7b5b715a3
commit
822cdc3fb8
@ -78,6 +78,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
// we want to control the uuid of the vhd in the chain
|
||||
// and ensure they are correctly chained
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.updateUuidAndChain({
|
||||
isVhdDifferencing,
|
||||
timestamp,
|
||||
vdis: deltaExport.vdis,
|
||||
}),
|
||||
'writer.updateUuidAndChain()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
|
@ -6,11 +6,10 @@ import { chainVhd, openVhd } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname } from 'node:path'
|
||||
import { dirname, basename } from 'node:path'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
@ -125,6 +124,44 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
}
|
||||
}
|
||||
|
||||
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
|
||||
assert.notStrictEqual(
|
||||
this.#vhds,
|
||||
undefined,
|
||||
'_transfer must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
|
||||
const parentVdiPaths = this.#parentVdiPaths
|
||||
const { handler } = this._adapter
|
||||
const vhds = this.#vhds
|
||||
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
if (isDifferencing) {
|
||||
assert.notStrictEqual(
|
||||
parentVdiPaths,
|
||||
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
const parentPath = parentVdiPaths[dirname(path)]
|
||||
// we are in a incremental backup
|
||||
// we already computed the chain in checkBaseVdis
|
||||
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
|
||||
// forbid any kind of loop
|
||||
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD if needed
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
const adapter = this._adapter
|
||||
const oldEntries = this._oldEntries
|
||||
@ -143,16 +180,10 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
|
||||
return { size: 0 }
|
||||
}
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
// update this.#vhds before eventually skipping transfer, so that
|
||||
// updateUuidAndChain has all the mandatory data
|
||||
const vhds = (this.#vhds = mapValues(
|
||||
deltaExport.vdis,
|
||||
vdi =>
|
||||
`vdis/${jobId}/${
|
||||
@ -162,7 +193,15 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
))
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
|
||||
return { size: 0 }
|
||||
}
|
||||
|
||||
metadataContent = {
|
||||
isVhdDifferencing,
|
||||
@ -178,38 +217,13 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await asyncEach(
|
||||
Object.entries(deltaExport.vdis),
|
||||
async ([id, vdi]) => {
|
||||
Object.keys(deltaExport.vdis),
|
||||
async id => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDifferencing) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
)
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||
)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
// since i += await asyncFun lead to race condition
|
||||
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||
@ -221,17 +235,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
transferSize += transferSizeOneDisk
|
||||
|
||||
if (isDifferencing) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
},
|
||||
{
|
||||
concurrency: settings.diskPerVmConcurrency,
|
||||
|
@ -38,7 +38,9 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
updateUuidAndChain() {
|
||||
// nothing to do, the chaining is not modified in this case
|
||||
}
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
|
@ -5,6 +5,10 @@ export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
updateUuidAndChain() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
@ -230,6 +230,7 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
|
||||
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
|
||||
- `cleanup()`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- **Full**
|
||||
|
Loading…
Reference in New Issue
Block a user