feat(backups): write and merge block concurrency are now configurable (#6416)

This commit is contained in:
Florent BEAUCHAMP
2022-09-16 14:54:33 +02:00
committed by GitHub
parent ad02700b51
commit 9da65b6c7c
7 changed files with 38 additions and 7 deletions

View File

@@ -636,13 +636,13 @@ class RemoteAdapter {
return path return path
} }
async writeVhd(path, input, { checksum = true, validator = noop } = {}) { async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
const handler = this._handler const handler = this._handler
if (this.#useVhdDirectory()) { if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd` const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, { await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16, concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(), compression: this.#getCompressionType(),
async validator() { async validator() {
await input.task await input.task

View File

@@ -37,7 +37,7 @@ const computeVhdsSize = (handler, vhdPaths) =>
) )
// chain is [ ancestor, child_1, ..., child_n ] // chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) { async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) { if (merge) {
logInfo(`merging VHD chain`, { chain }) logInfo(`merging VHD chain`, { chain })
@@ -55,6 +55,7 @@ async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
try { try {
return await mergeVhdChain(handler, chain, { return await mergeVhdChain(handler, chain, {
logInfo, logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) { onProgress({ done: d, total: t }) {
done = d done = d
total = t total = t
@@ -181,7 +182,15 @@ const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm( exports.cleanVm = async function cleanVm(
vmDir, vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn } {
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
) { ) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain) const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
@@ -447,7 +456,13 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {} const metadataWithMergedVhd = {}
const doMerge = async () => { const doMerge = async () => {
await asyncMap(toMerge, async chain => { await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge }) const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
if (merged !== undefined) { if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true metadataWithMergedVhd[metadataPath] = true

View File

@@ -204,6 +204,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
// merges and chainings // merges and chainings
checksum: false, checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath), validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
}) })
if (isDelta) { if (isDelta) {

View File

@@ -39,6 +39,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
Task.warning(message, data) Task.warning(message, data)
}, },
lock: false, lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
}) })
}) })
} catch (error) { } catch (error) {

View File

@@ -8,6 +8,7 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it” > Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup] Improve listing speed by updating caches instead of regenerating them on backup creation/deletion (PR [#6411](https://github.com/vatesfr/xen-orchestra/pull/6411)) - [Backup] Improve listing speed by updating caches instead of regenerating them on backup creation/deletion (PR [#6411](https://github.com/vatesfr/xen-orchestra/pull/6411))
- [Backup] Add `mergeBlockConcurrency` and `writeBlockConcurrency` to allow tuning of backup resources consumptions (PR [#6416](https://github.com/vatesfr/xen-orchestra/pull/6416))
### Bug fixes ### Bug fixes
@@ -34,6 +35,7 @@
<!--packages-start--> <!--packages-start-->
- @xen-orchestra/backups minor - @xen-orchestra/backups minor
- vhd-lib minor
- xo-server-auth-saml patch - xo-server-auth-saml patch
- xo-web patch - xo-web patch

View File

@@ -78,7 +78,7 @@ module.exports._cleanupVhds = cleanupVhds
module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain( module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
handler, handler,
chain, chain,
{ onProgress = noop, logInfo = noop, removeUnused = false } = {} { onProgress = noop, logInfo = noop, removeUnused = false, mergeBlockConcurrency = 2 } = {}
) { ) {
assert(chain.length >= 2) assert(chain.length >= 2)
@@ -123,7 +123,8 @@ module.exports.mergeVhdChain = limitConcurrency(2)(async function mergeVhdChain(
childIsVhdDirectory = childVhd instanceof VhdDirectory childIsVhdDirectory = childVhd instanceof VhdDirectory
} }
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? 2 : 1 // merging vhdFile must not be concurrently with the potential block reordering after a change
const concurrency = parentIsVhdDirectory && childIsVhdDirectory ? mergeBlockConcurrency : 1
if (mergeState === undefined) { if (mergeState === undefined) {
// merge should be along a vhd chain // merge should be along a vhd chain
assert.strictEqual(UUID.stringify(childVhd.header.parentUuid), UUID.stringify(parentVhd.footer.uuid)) assert.strictEqual(UUID.stringify(childVhd.header.parentUuid), UUID.stringify(parentVhd.footer.uuid))

View File

@@ -87,8 +87,19 @@ snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
# Delay for which backups listing on a remote is cached # Delay for which backups listing on a remote is cached
listingDebounce = '1 min' listingDebounce = '1 min'
# settings when using Vhd directories ( s3 , encryption )
# you should use 'none' if your fs is already compressed
# changing this setting will generate new full backups
vhdDirectoryCompression = 'brotli' vhdDirectoryCompression = 'brotli'
# how many block can be merged in parallel per backup running
# increase to increase performance, reduce if you have timeout during merge
mergeBlockConcurrency = 2
# how many block can be uploaded in parallel
# increase to in rease performance, reduce if you have timeout or memory error during transfer
writeBlockConcurrency = 16
# This is a work-around. # This is a work-around.
# #
# See https://github.com/vatesfr/xen-orchestra/pull/4674 # See https://github.com/vatesfr/xen-orchestra/pull/4674