diff --git a/@xen-orchestra/backups/_cleanVm.integ.spec.js b/@xen-orchestra/backups/_cleanVm.integ.spec.js index 3904b436a..c821e892c 100644 --- a/@xen-orchestra/backups/_cleanVm.integ.spec.js +++ b/@xen-orchestra/backups/_cleanVm.integ.spec.js @@ -178,7 +178,7 @@ test('it merges delta of non destroyed chain', async () => { `metadata.json`, JSON.stringify({ mode: 'delta', - size: 209920, + size: 12000, // a size too small vhds: [ `${basePath}/grandchild.vhd`, // grand child should not be merged `${basePath}/child.vhd`, @@ -204,20 +204,25 @@ test('it merges delta of non destroyed chain', async () => { }, }) - let loggued = '' + let loggued = [] const onLog = message => { - loggued += message + '\n' + loggued.push(message) } await adapter.cleanVm('/', { remove: true, onLog }) - expect(loggued).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused\n`) - loggued = '' + expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`) + expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`) + + loggued = [] await adapter.cleanVm('/', { remove: true, merge: true, onLog }) - const [unused, merging] = loggued.split('\n') + const [unused, merging] = loggued expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`) expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`) - // merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12) + const metadata = JSON.parse(await handler.readFile(`metadata.json`)) + // size should be the size of children + grand children after the merge + expect(metadata.size).toEqual(209920) + // merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12) // only check deletion const remainingVhds = await handler.list(basePath) expect(remainingVhds.length).toEqual(2) @@ -230,7 +235,7 @@ test('it finish unterminated merge ', async () => { `metadata.json`, JSON.stringify({ mode: 'delta', - size: 209920, + size: undefined, vhds: [ `${basePath}/orphan.vhd`, // grand child should not be merged `${basePath}/child.vhd`, @@ -364,6 +369,10 @@ describe('tests mulitple combination ', () => { ) await adapter.cleanVm('/', { remove: true, merge: true }) + const metadata = JSON.parse(await handler.readFile(`metadata.json`)) + // size should be the size of children + grand children + clean after the merge + expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined) + // broken vhd, non referenced, abandonned should be deleted ( alias and data) // ancestor and child should be merged // grand child and clean vhd should not have changed diff --git a/@xen-orchestra/backups/_cleanVm.js b/@xen-orchestra/backups/_cleanVm.js index 196f3bb9e..591aeb7f4 100644 --- a/@xen-orchestra/backups/_cleanVm.js +++ b/@xen-orchestra/backups/_cleanVm.js @@ -10,6 +10,24 @@ const { limitConcurrency } = require('limit-concurrency-decorator') const { Task } = require('./Task.js') const { Disposable } = require('promise-toolbox') +// checking the size of a vhd directory is costly +// 1 Http Query per 1000 blocks +// we only check size of all the vhd are VhdFiles +function shouldComputeVhdsSize(vhds) { + return vhds.every(vhd => vhd instanceof VhdFile) +} + +const computeVhdsSize = (handler, vhdPaths) => + Disposable.use( + vhdPaths.map(vhdPath => openVhd(handler, vhdPath)), + async vhds => { + if (shouldComputeVhdsSize(vhds)) { + const sizes = await asyncMap(vhds, vhd => vhd.getSize()) + return sum(sizes) + } + } + ) + // chain is an array of VHDs from child to parent // // the whole chain will be merged into parent, parent will be renamed to child @@ -130,6 +148,7 @@ exports.cleanVm = async function cleanVm( const handler = this._handler const vhds = new Set() + const vhdsToJSons = new Set() const vhdParents = { __proto__: null } const vhdChildren = { __proto__: null } @@ -242,16 +261,10 @@ exports.cleanVm = async function cleanVm( } const { mode } = metadata - let size if (mode === 'full') { const linkedXva = resolve('/', vmDir, metadata.xva) - if (xvas.has(linkedXva)) { unusedXvas.delete(linkedXva) - - size = await handler.getSize(linkedXva).catch(error => { - onLog(`failed to get size of ${json}`, { error }) - }) } else { onLog(`the XVA linked to the metadata ${json} is missing`) if (remove) { @@ -272,22 +285,9 @@ exports.cleanVm = async function cleanVm( // possible (existing disks) even if one disk is missing if (missingVhds.length === 0) { linkedVhds.forEach(_ => unusedVhds.delete(_)) - - // checking the size of a vhd directory is costly - // 1 Http Query per 1000 blocks - // we only check size of all the vhd are VhdFiles - - const shouldComputeSize = linkedVhds.every(vhd => vhd instanceof VhdFile) - if (shouldComputeSize) { - try { - await Disposable.use(Disposable.all(linkedVhds.map(vhdPath => openVhd(handler, vhdPath))), async vhds => { - const sizes = await asyncMap(vhds, vhd => vhd.getSize()) - size = sum(sizes) - }) - } catch (error) { - onLog(`failed to get size of ${json}`, { error }) - } - } + linkedVhds.forEach(path => { + vhdsToJSons[path] = json + }) } else { onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds }) if (remove) { @@ -297,22 +297,6 @@ exports.cleanVm = async function cleanVm( } } } - - const metadataSize = metadata.size - if (size !== undefined && metadataSize !== size) { - onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`) - - // don't update if the the stored size is greater than found files, - // it can indicates a problem - if (fixMetadata && (metadataSize === undefined || metadataSize < size)) { - try { - metadata.size = size - await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' }) - } catch (error) { - onLog(`failed to update size in backup metadata ${json}`, { error }) - } - } - } }) // TODO: parallelize by vm/job/vdi @@ -371,9 +355,15 @@ exports.cleanVm = async function cleanVm( }) } - const doMerge = () => { - const promise = asyncMap(toMerge, async chain => limitedMergeVhdChain(chain, { handler, onLog, remove, merge })) - return merge ? promise.then(sizes => ({ size: sum(sizes) })) : promise + const metadataWithMergedVhd = {} + const doMerge = async () => { + await asyncMap(toMerge, async chain => { + const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge }) + if (merged !== undefined) { + const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file + metadataWithMergedVhd[metadataPath] = true + } + }) } await Promise.all([ @@ -398,6 +388,46 @@ exports.cleanVm = async function cleanVm( }), ]) + // update size for delta metadata with merged VHD + // check for the other that the size is the same as the real file size + + await asyncMap(jsons, async metadataPath => { + const metadata = JSON.parse(await handler.readFile(metadataPath)) + + let fileSystemSize + const merged = metadataWithMergedVhd[metadataPath] !== undefined + + const { mode, size, vhds, xva } = metadata + + try { + if (mode === 'full') { + // a full backup : check size + const linkedXva = resolve('/', vmDir, xva) + fileSystemSize = await handler.getSize(linkedXva) + } else if (mode === 'delta') { + fileSystemSize = await computeVhdsSize(handler, vhds) + + // don't warn if the size has changed after a merge + if (!merged && fileSystemSize !== size) { + onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`) + } + } + } catch (error) { + onLog(`failed to get size of ${metadataPath}`, { error }) + return + } + + // systematically update size after a merge + if ((merged || fixMetadata) && size !== fileSystemSize) { + metadata.size = fileSystemSize + try { + await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' }) + } catch (error) { + onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error }) + } + } + }) + return { // boolean whether some VHDs were merged (or should be merged) merge: toMerge.length !== 0, diff --git a/CHANGELOG.unreleased.md b/CHANGELOG.unreleased.md index e8100890e..7156e5e6d 100644 --- a/CHANGELOG.unreleased.md +++ b/CHANGELOG.unreleased.md @@ -11,6 +11,8 @@ > Users must be able to say: “I had this issue, happy to know it's fixed” +- [Backup] Remove incorrect size warning following a merge [Forum #5727](https://xcp-ng.org/forum/topic/4769/warnings-showing-in-system-logs-following-each-backup-job/4) (PR [#6010](https://github.com/vatesfr/xen-orchestra/pull/6010)) + ### Packages to release > Packages will be released in the order they are here, therefore, they should @@ -28,5 +30,7 @@ > > In case of conflict, the highest (lowest in previous list) `$version` wins. -- @xen-orchestra/proxy patch - vhd-lib minor +- @xen-orchestra/backups patch +- xo-server patch +- @xen-orchestra/proxy patch