Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
17a697e750 fix(backups): better detection of looped vhd chain 2024-02-09 16:36:28 +00:00
9 changed files with 14 additions and 76 deletions

View File

@@ -432,6 +432,13 @@ export async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
if (chain.includes(vhd)) {
logWarn('loop vhd chain', { path: vhd })
// keep the current chain
// note that a VHD can't have two children, that means that
// a looped one is always the last of a chain
return chain
}
chain.unshift(vhd)
return chain
}

View File

@@ -26,16 +26,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
}
_mustDoSnapshot() {
const vm = this._vm
const settings = this._settings
return (
settings.unconditionalSnapshot ||
(!settings.offlineBackup && vm.power_state === 'Running') ||
settings.snapshotRetention !== 0 ||
settings.fullInterval !== 1 ||
settings.deltaComputationMode === 'AGAINST_PARENT_VHD'
)
return true
}
async _copy() {

View File

@@ -3,7 +3,7 @@ import mapValues from 'lodash/mapValues.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract, VhdSynthetic } from 'vhd-lib'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
@@ -183,7 +183,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
let parentPath
let parentVhd
if (isDifferencing) {
const vdiDir = dirname(path)
parentPath = (
@@ -205,11 +204,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
if(settings.deltaComputationMode === 'AGAINST_PARENT_VHD'){
const {dispose, value } = await VhdSynthetic.fromVhdChain(handler, parentPath)
parentVhd = value
$defer(()=>dispose())
}
}
// don't write it as transferSize += await async function
@@ -219,7 +213,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
parentVhd,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._config.writeBlockConcurrency,
})

View File

@@ -29,6 +29,7 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- xo-server patch
<!--packages-end-->

View File

@@ -84,9 +84,6 @@ exports.VhdAbstract = class VhdAbstract {
readBlockAllocationTable() {
throw new Error(`reading block allocation table is not implemented`)
}
readBlockHashes() {
throw new Error(`reading block hashes table is not implemented`)
}
/**
* @typedef {Object} BitmapBlock
@@ -107,10 +104,6 @@ exports.VhdAbstract = class VhdAbstract {
throw new Error(`reading ${onlyBitmap ? 'bitmap of block' : 'block'} ${blockId} is not implemented`)
}
getBlockHash(blockId){
throw new Error(`reading block hash ${blockId} is not implemented`)
}
/**
* coalesce the block with id blockId from the child vhd into
* this vhd

View File

@@ -4,7 +4,6 @@ const { unpackHeader, unpackFooter, sectorsToBytes } = require('./_utils')
const { createLogger } = require('@xen-orchestra/log')
const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
const { test, set: setBitmap } = require('../_bitmap')
const { hashBlock } = require('../hashBlock')
const { VhdAbstract } = require('./VhdAbstract')
const assert = require('assert')
const { synchronized } = require('decorator-synchronized')
@@ -76,7 +75,6 @@ function getCompressor(compressorType) {
exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
#uncheckedBlockTable
#blockHashes
#header
footer
#compressor
@@ -142,17 +140,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
this.#blockTable = buffer
}
async readBlockHashes() {
try {
const { buffer } = await this._readChunk('hashes')
this.#blockHashes = JSON.parse(buffer)
} catch (err) {
if (err.code !== 'ENOENT') {
throw err
}
}
}
containsBlock(blockId) {
return test(this.#blockTable, blockId)
}
@@ -190,11 +177,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
const blockSuffix = blockId - blockPrefix * 1e3
return `blocks/${blockPrefix}/${blockSuffix}`
}
getBlockHash(blockId) {
if (this.#blockHashes !== undefined) {
return this.#blockHashes[blockId]
}
}
_getFullBlockPath(blockId) {
return this.#getChunkPath(this.#getBlockPath(blockId))
@@ -227,10 +209,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
}
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
const hash = this.getBlockHash(blockId)
if (hash) {
assert.strictEqual(hash, hash(buffer))
}
return {
id: blockId,
bitmap: buffer.slice(0, this.bitmapSize),
@@ -266,7 +244,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
assert.notStrictEqual(this.#blockTable, undefined, 'Block allocation table has not been read')
assert.notStrictEqual(this.#blockTable.length, 0, 'Block allocation table is empty')
return Promise.all([this._writeChunk('bat', this.#blockTable), this._writeChunk('hashes', this.#blockHashes)])
return this._writeChunk('bat', this.#blockTable)
}
// only works if data are in the same handler
@@ -287,11 +265,8 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
if (!blockExists) {
setBitmap(this.#blockTable, blockId)
this.#blockHashes[blockId] = child.getBlockHash(blockId)
await this.writeBlockAllocationTable()
}
// @todo block hashes changs may be lost if the vhd merging fail
// should migrate to writing bat from time to time, sync with the metadata
} catch (error) {
if (error.code === 'ENOENT' && isResumingMerge === true) {
// when resuming, the blocks moved since the last merge state write are
@@ -312,7 +287,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
async writeEntireBlock(block) {
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
setBitmap(this.#blockTable, block.id)
this.#blockHashes[block.id] = hashBlock(block.buffer)
}
async _readParentLocatorData(id) {

View File

@@ -96,10 +96,6 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
assert(false, `no such block ${blockId}`)
}
async getBlockHash(blockId){
return this.#getVhdWithBlock(blockId).getBlockHash(blockId)
}
async readBlock(blockId, onlyBitmap = false) {
// only read the content of the first vhd containing this block
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)

View File

@@ -1,7 +1,6 @@
'use strict'
const { createLogger } = require('@xen-orchestra/log')
const { hashBlock } = require('./hashBlock.js')
const { parseVhdStream } = require('./parseVhdStream.js')
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
const { Disposable } = require('promise-toolbox')
@@ -9,7 +8,7 @@ const { asyncEach } = require('@vates/async-each')
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, parentVhd }) {
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
const vhd = yield VhdDirectory.create(handler, path, { compression })
await asyncEach(
parseVhdStream(inputStream),
@@ -25,10 +24,6 @@ const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, {
await vhd.writeParentLocator({ ...item, data: item.buffer })
break
case 'block':
if (parentVhd !== undefined && hashBlock(item.buffer) === parentVhd.getBlockHash(item.id)) {
// already in parent
return
}
await vhd.writeEntireBlock(item)
break
case 'bat':
@@ -50,10 +45,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
handler,
path,
inputStream,
{ validator, concurrency = 16, compression, parentVhd } = {}
{ validator, concurrency = 16, compression } = {}
) {
try {
const size = await buildVhd(handler, path, inputStream, { concurrency, compression, parentVhd })
const size = await buildVhd(handler, path, inputStream, { concurrency, compression })
if (validator !== undefined) {
await validator.call(this, path)
}

View File

@@ -1,12 +0,0 @@
'use strict'
const { createHash } = require('node:crypto')
// using xxhash as for xva would make smaller hash and the collision risk would be low for the dedup,
// since we have a tuple(index, hash), but it would be notable if
// we implement dedup on top of this later
// at most, a 2TB full vhd will use 32MB for its hashes
// and this file is compressed with vhd block
exports.hashBlock = function (buffer) {
return createHash('sha256').update(buffer).digest('hex')
}