Compare commits

...

7 Commits

Author SHA1 Message Date
Florent BEAUCHAMP
21e22626b6 wip 2023-10-30 14:39:23 +01:00
Florent BEAUCHAMP
06cddc516d refactor: pass baseVdi to importIncrementalVm 2023-10-24 17:38:46 +02:00
Florent BEAUCHAMP
d7f8d411b5 disc-oriented approach 2023-10-24 14:20:13 +02:00
Florent BEAUCHAMP
16570c6c66 reset 2023-10-24 13:18:49 +02:00
Florent BEAUCHAMP
b0789a7e31 implement same sr and vdi delta check 2023-10-24 13:17:10 +02:00
Florent BEAUCHAMP
5e71b63333 feat(@xen-orchestra/backups):ImportVmBackup 2023-10-24 13:17:10 +02:00
Florent BEAUCHAMP
3d616fde43 wip 2023-10-24 13:17:10 +02:00
4 changed files with 128 additions and 36 deletions

View File

@@ -14,7 +14,26 @@ export class ImportVmBackup {
this._xapi = xapi
}
async #detectBaseVdis(){
const vmUuid = this._metadata.vm.uuid
const vm = await this._xapi.getRecordByUuid('VM', vmUuid)
const disks = vm.$getDisks()
const snapshots = {}
console.log({disks})
for (const disk of Object.values(disks)){
console.log({snapshots: disk.snapshots})
for(const snapshotRef of disk.snapshots){
const snapshot = await this._xapi.getRecordByUuid('VDI', snapshotRef)
snapshots[snapshot.uuid] = disk.uuid
}
}
console.log({snapshots})
return snapshots
}
async run() {
console.log('RUN')
const adapter = this._adapter
const metadata = this._metadata
const isFull = metadata.mode === 'full'
@@ -22,18 +41,23 @@ export class ImportVmBackup {
const sizeContainer = { size: 0 }
let backup
if (isFull) {
backup = await adapter.readFullVmBackup(metadata)
watchStreamSize(backup, sizeContainer)
} else {
console.log('restore delta')
assert.strictEqual(metadata.mode, 'delta')
const ignoredVdis = new Set(
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
.map(([vdiUuid]) => vdiUuid)
)
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
//const vdiSnap = await this._xapi.getRecord('VDI-snapshot','83c96977-9bc5-483d-b816-4c96622fb5e6')
//console.log({vdiSnap})
const baseVdis = this.#detectBaseVdis()
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis, { baseVdis })
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
}
@@ -49,7 +73,7 @@ export class ImportVmBackup {
? await xapi.VM_import(backup, srRef)
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
...this._importIncrementalVmSettings,
detectBase: false,
baseVdis
})
await Promise.all([

View File

@@ -2,7 +2,7 @@ import { asyncEach } from '@vates/async-each'
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import { compose } from '@vates/compose'
import { createLogger } from '@xen-orchestra/log'
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic ,Constants} from 'vhd-lib'
import { decorateMethodsWith } from '@vates/decorate-with'
import { deduped } from '@vates/disposable/deduped.js'
import { dirname, join, resolve } from 'node:path'
@@ -694,8 +694,8 @@ export class RemoteAdapter {
return container.size
}
// open the hierarchy of ancestors until we find a full one
async _createVhdStream(handler, path, { useChain }) {
// open the hierarchy of ancestors until we find a usable one
async _createVhdStream(handler, path, { useChain, snapshotedVdis }) {
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
@@ -713,7 +713,67 @@ export class RemoteAdapter {
}
const synthetic = disposableSynthetic.value
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
let stream
// try to create a stream that will reuse any data already present on the host storage
// by looking for an existing snapshot matching one of the vhd in the chain
// and transfer only the differential
if (snapshotedVdis) {
try{
let vhdPaths = await handler.list(dirname(path), {filter: path=>path.endsWith('.vhd')})
stream = await Disposable.use(async function *(){
const vhdChilds = {}
const vhds = yield Disposable.all(vhdPaths.map(path => openVhd(handler, path, opts)))
for(const vhd of vhds){
vhdChilds[vhd.header.parentUuid] = vhdChilds[vhd.header.parentUuid] ?? []
vhdChilds[vhd.header.parentUuid].push(vhd)
}
let chain = []
let current = synthetic
// @todo : special case : we want to restore a vdi
// that still have its snapshot => nothing to transfer
while(current != undefined){
// find the child VDI of path
const childs = vhdChilds[current.footer.uuid]
// more than one => break
// inexistant => break
if(childs.length !== 1){
break
}
const child = childs[0]
// not a differential => we won't have a list of block changed
// no need to continue looking
if(child.footer.diskType !== Constants.DISK_TYPES.DIFFERENCING){
break
}
// we have a snapshot
if(snapshotedVdis[current.footer.uuid] !== undefined){
const descendants = VhdSynthetic.open(handler)
negativeVhd = new NegativeVhd(synthetic, descendants)
return negative.stream()
} else {
// continue to look into the chain
// hoping we'll found a match deeper
current = child
chain.unshift(current)
}
}
})
}catch(error){
warn("error while trying to reuse a snapshot, fallback to legacy restore", {error})
}
}
// fallback
if (stream === undefined) {
stream = await synthetic.stream()
}
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
@@ -721,7 +781,7 @@ export class RemoteAdapter {
return stream
}
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true, snapshotedVdis } = {}) {
const handler = this._handler
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
const dir = dirname(metadata._filename)
@@ -729,7 +789,7 @@ export class RemoteAdapter {
const streams = {}
await asyncMapSettled(Object.keys(vdis), async ref => {
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain, snapshotedVdis })
})
return {

View File

@@ -143,11 +143,38 @@ export async function exportIncrementalVm(
)
}
// @todo movve this to incremental replication
async function detectBaseVdis(vmRecord, sr) {
let baseVm
const xapi = sr.$xapi
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(
xapi.objects.all,
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vdi.other_config[TAG_COPY_SRC]] = vbd.$VDI
}
})
return baseVdis
}
export const importIncrementalVm = defer(async function importIncrementalVm(
$defer,
incrementalVm,
sr,
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
{ baseVdis = {}, cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
) {
const { version } = incrementalVm
if (compareVersions(version, '1.0.0') < 0) {
@@ -157,35 +184,15 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
const vmRecord = incrementalVm.vm
const xapi = sr.$xapi
let baseVm
if (detectBase) {
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(
xapi.objects.all,
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
}
const cache = new Map()
const mapVdisSrRefs = {}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
if (detectBase) {
baseVdis = await detectBaseVdis(vmRecord, sr)
}
const vdiRecords = incrementalVm.vdis
// 0. Create suspend_VDI
@@ -249,10 +256,11 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
const vdi = vdiRecords[vdiRef]
let newVdi
// @todo how to rewrite this condition when giving directly a baseVdi ?
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (remoteBaseVdiUuid) {
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
const baseVdi = baseVdis[vdi.other_config[TAG_COPY_SRC]]
// @todo : should be an error only for detectBase
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}

View File

@@ -37,7 +37,7 @@ function mapProperties(object, mapping) {
}
async function showDetails(handler, path) {
const vhd = new VhdFile(handler, resolve(path))
const {value: vhd} = await openVhd(handler, resolve(path))
try {
await vhd.readHeaderAndFooter()