fix(backups): mirror must not replicate themselves (#7043)

Fixes zammad#16871
This commit is contained in:
Florent BEAUCHAMP 2023-09-21 14:45:29 +02:00 committed by GitHub
parent db92f0e365
commit e5c5f19219
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 11 deletions

View File

@ -4,6 +4,7 @@ import { Disposable } from 'promise-toolbox'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
import { Abstract } from './_Abstract.mjs'
import { extractIdsFromSimplePattern } from '../../extractIdsFromSimplePattern.mjs'
export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
constructor({
@ -34,7 +35,8 @@ export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstrac
this._writers = writers
const RemoteWriter = this._getRemoteWriter()
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
extractIdsFromSimplePattern(job.remotes).forEach(remoteId => {
const adapter = remoteAdapters[remoteId]
const targetSettings = {
...settings,
...allSettings[remoteId],

View File

@ -18,6 +18,7 @@
- [Jobs] Fix schedules not being displayed on first load [#6968](https://github.com/vatesfr/xen-orchestra/issues/6968) (PR [#7034](https://github.com/vatesfr/xen-orchestra/pull/7034))
- [OVA Export] Fix support of disks with more than 8.2GiB of content (PR [#7047](https://github.com/vatesfr/xen-orchestra/pull/7047))
- [Backup] Fix `VHDFile implementation is not compatible with encrypted remote` when using VHD directory with encryption (PR [#7045](https://github.com/vatesfr/xen-orchestra/pull/7045))
- [Backup/Mirror] Fix `xo:fs:local WARN lock compromised` when mirroring a Backup Repository to a local/NFS/SMB repository ([#7043](https://github.com/vatesfr/xen-orchestra/pull/7043))
### Packages to release
@ -35,6 +36,7 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- vhd-lib minor
- xo-server patch
- xo-server-auth-github patch

View File

@ -148,10 +148,7 @@ export default class BackupNg {
const proxyId = job.proxy
const useXoProxy = proxyId !== undefined
const remoteIds = unboxIdsFromPattern(job.remotes)
if (job.sourceRemote !== undefined) {
remoteIds.push(job.sourceRemote)
}
const targetRemoteIds = unboxIdsFromPattern(job.remotes)
try {
if (!useXoProxy && backupsConfig.disableWorkers) {
const localTaskIds = { __proto__: null }
@ -207,7 +204,11 @@ export default class BackupNg {
const xapis = {}
const remoteErrors = {}
await waitAll([
asyncMapSettled(remoteIds, async id => {
asyncMapSettled([...targetRemoteIds, job.sourceRemote], async id => {
if (id === undefined) {
// job.sourceRemote is only defined in mirror backups
return
}
let remote
try {
remote = await app.getRemoteWithCredentials(id)
@ -249,20 +250,29 @@ export default class BackupNg {
}),
])
// Fails the job if all remotes are disabled
// update remotes list with only the enabled remotes
// only keep the destination remote in case of a mirror backup
const enabledTargetRemotes = Object.keys(remotes).filter(remoteId => remoteId !== job.sourceRemote)
// Fails the job if all the target remotes are disabled
//
// TODO: integrate each failure in its own tasks and still proceed
// with other tasks like rolling snapshot and replication.
if (remoteIds.length > 0 && Object.keys(remotes).length === 0) {
if (targetRemoteIds.length > 0 && enabledTargetRemotes.length === 0) {
const error = new Error(`couldn't instantiate any remote`)
error.errors = remoteErrors
throw error
}
// update remotes list with only the enabled remotes
if (job.sourceRemote !== undefined && remotes[job.sourceRemote] === undefined) {
const error = new Error(`couldn't instantiate source remote`)
error.errors = remoteErrors
throw error
}
job.remotes = {
id: {
__or: Object.keys(remotes),
__or: enabledTargetRemotes,
},
}
@ -332,7 +342,7 @@ export default class BackupNg {
)
}
} finally {
remoteIds.forEach(id => this._listVmBackupsOnRemote(REMOVE_CACHE_ENTRY, id))
targetRemoteIds.forEach(id => this._listVmBackupsOnRemote(REMOVE_CACHE_ENTRY, id))
}
}
app.registerJobExecutor('backup', executor)