Compare commits
2 Commits
fix_vmware
...
vm-backup-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d12ece455d | ||
|
|
bdc6e43ff2 |
@@ -191,14 +191,13 @@ export class RemoteAdapter {
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
return await Disposable.use(VhdSynthetic.fromVhdChain(this.handler, path), vhd => {
|
||||
return await Disposable.use(openVhd(this.handler, path), vhd => {
|
||||
// this baseUuid is not linked with this vhd
|
||||
if (!vhd.footer.uuid.equals(packedParentUid)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check if all the chain is composed of vhd directory
|
||||
const isVhdDirectory = vhd.checkVhdsClass(VhdDirectory)
|
||||
const isVhdDirectory = vhd instanceof VhdDirectory
|
||||
return isVhdDirectory
|
||||
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.useVhdDirectory()
|
||||
|
||||
@@ -6,11 +6,12 @@ import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
@@ -20,6 +21,7 @@ const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
nRetriesVmBackupFailures: 0,
|
||||
timeout: 0,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
@@ -41,6 +43,7 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
|
||||
await Disposable.use(
|
||||
() => this._getAdapter(job.sourceRemote),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
@@ -62,8 +65,19 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const queue = new Set(vmsUuids)
|
||||
const taskByVmId = {}
|
||||
const nTriesByVmId = {}
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
if (nTriesByVmId[vmUuid] === undefined) {
|
||||
nTriesByVmId[vmUuid] = 0
|
||||
}
|
||||
nTriesByVmId[vmUuid]++
|
||||
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
const vmSettings = { ...settings, ...allSettings[vmUuid] }
|
||||
const isLastRun = nTriesByVmId[vmUuid] === vmSettings.nRetriesVmBackupFailures + 1
|
||||
|
||||
const opts = {
|
||||
baseSettings,
|
||||
@@ -72,7 +86,7 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vmUuid] },
|
||||
settings: vmSettings,
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
@@ -86,10 +100,39 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
if (taskByVmId[vmUuid] === undefined) {
|
||||
taskByVmId[vmUuid] = new Task(taskStart)
|
||||
}
|
||||
const task = taskByVmId[vmUuid]
|
||||
return task
|
||||
.run(async () => {
|
||||
try {
|
||||
const result = await vmBackup.run()
|
||||
task.success(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
if (isLastRun) {
|
||||
throw error
|
||||
} else {
|
||||
Task.warning(`Retry the VM mirror backup due to an error`, {
|
||||
attempt: nTriesByVmId[vmUuid],
|
||||
error: error.message,
|
||||
})
|
||||
queue.add(vmUuid)
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch(noop)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
const _handleVm = !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm)
|
||||
|
||||
while (queue.size > 0) {
|
||||
const vmIds = Array.from(queue)
|
||||
queue.clear()
|
||||
|
||||
await asyncMapSettled(vmIds, _handleVm)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
@@ -24,6 +26,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
nRetriesVmBackupFailures: 0,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
@@ -53,6 +56,7 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
@@ -89,48 +93,98 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const queue = new Set(vmIds)
|
||||
const taskByVmId = {}
|
||||
const nTriesByVmId = {}
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const getVmTask = () => {
|
||||
if (taskByVmId[vmUuid] === undefined) {
|
||||
taskByVmId[vmUuid] = new Task(taskStart)
|
||||
}
|
||||
return taskByVmId[vmUuid]
|
||||
}
|
||||
const vmBackupFailed = error => {
|
||||
if (isLastRun) {
|
||||
throw error
|
||||
} else {
|
||||
Task.warning(`Retry the VM backup due to an error`, {
|
||||
attempt: nTriesByVmId[vmUuid],
|
||||
error: error.message,
|
||||
})
|
||||
queue.add(vmUuid)
|
||||
}
|
||||
}
|
||||
|
||||
if (nTriesByVmId[vmUuid] === undefined) {
|
||||
nTriesByVmId[vmUuid] = 0
|
||||
}
|
||||
nTriesByVmId[vmUuid]++
|
||||
|
||||
const vmSettings = { ...settings, ...allSettings[vmUuid] }
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
const isLastRun = nTriesByVmId[vmUuid] === vmSettings.nRetriesVmBackupFailures + 1
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () => {
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
Disposable.use(disposableVm, async vm => {
|
||||
if (taskStart.data.name_label === undefined) {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
}
|
||||
|
||||
const task = getVmTask()
|
||||
return task
|
||||
.run(async () => {
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: vmSettings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}
|
||||
}
|
||||
return vmBackup.run()
|
||||
})
|
||||
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await vmBackup.run()
|
||||
task.success(result)
|
||||
return result
|
||||
} catch (error) {
|
||||
vmBackupFailed(error)
|
||||
}
|
||||
})
|
||||
.catch(noop) // errors are handled by logs
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
getVmTask().run(() => {
|
||||
vmBackupFailed(error)
|
||||
})
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
const _handleVm = concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm)
|
||||
|
||||
while (queue.size > 0) {
|
||||
const vmIds = Array.from(queue)
|
||||
queue.clear()
|
||||
|
||||
await asyncMapSettled(vmIds, _handleVm)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ import { asyncEach } from '@vates/async-each'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import assert from 'node:assert'
|
||||
import * as UUID from 'uuid'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
@@ -10,48 +9,11 @@ import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
async _selectBaseVm(metadata) {
|
||||
// for each disk , get the parent
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
|
||||
// no previous backup for a base( =key) backup
|
||||
if (metadata.isBase) {
|
||||
return
|
||||
}
|
||||
await asyncEach(Object.entries(metadata.vdis), async ([id, vdi]) => {
|
||||
const isDifferencing = metadata.isVhdDifferencing[`${id}.vhd`]
|
||||
if (isDifferencing) {
|
||||
const vmDir = getVmBackupDir(metadata.vm.uuid)
|
||||
const path = `${vmDir}/${metadata.vhds[id]}`
|
||||
// don't catch error : we can't recover if the source vhd are missing
|
||||
await Disposable.use(openVhd(this._sourceRemoteAdapter._handler, path), vhd => {
|
||||
baseUuidToSrcVdi.set(UUID.stringify(vhd.header.parentUuid), vdi.$snapshot_of$uuid)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
// check if the parent vdi are present in all the remotes
|
||||
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
throw new Error(`Missing vdi ${baseUuid} which is a base for a delta`)
|
||||
}
|
||||
})
|
||||
// yeah , let's go
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
|
||||
await this._callWriters(async writer => {
|
||||
@@ -64,7 +26,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
await this._selectBaseVm(metadata)
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
@@ -88,17 +50,6 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
// this will update parent name with the needed alias
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.updateUuidAndChain({
|
||||
isVhdDifferencing,
|
||||
timestamp: metadata.timestamp,
|
||||
vdis: incrementalExport.vdis,
|
||||
}),
|
||||
'writer.updateUuidAndChain()'
|
||||
)
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
|
||||
@@ -78,18 +78,6 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
// we want to control the uuid of the vhd in the chain
|
||||
// and ensure they are correctly chained
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.updateUuidAndChain({
|
||||
isVhdDifferencing,
|
||||
timestamp,
|
||||
vdis: deltaExport.vdis,
|
||||
}),
|
||||
'writer.updateUuidAndChain()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
@@ -145,7 +133,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi.uuid)
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
@@ -166,18 +154,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdiUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdiUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdiUuid)
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
import assert from 'node:assert'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { chainVhd, openVhd } from 'vhd-lib'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { dirname } from 'node:path'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
@@ -21,45 +23,42 @@ import { Disposable } from 'promise-toolbox'
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
#parentVdiPaths
|
||||
#vhds
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
this.#parentVdiPaths = {}
|
||||
const { handler } = this._adapter
|
||||
const adapter = this._adapter
|
||||
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdiUuid]) => {
|
||||
let parentDestPath
|
||||
const vhdDir = `${vdisDir}/${srcVdiUuid}`
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
try {
|
||||
const vhds = await handler.list(vhdDir, {
|
||||
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
|
||||
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
})
|
||||
const packedBaseUuid = packUuid(baseUuid)
|
||||
// the last one is probably the right one
|
||||
|
||||
for (let i = vhds.length - 1; i >= 0 && parentDestPath === undefined; i--) {
|
||||
const path = vhds[i]
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
if (await adapter.isMergeableParent(packedBaseUuid, path)) {
|
||||
parentDestPath = path
|
||||
}
|
||||
await checkVhdChain(handler, path)
|
||||
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
|
||||
//
|
||||
// since all the checks of a path are done in parallel, found would be containing
|
||||
// only the last answer of isMergeableParent which is probably not the right one
|
||||
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
|
||||
|
||||
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
|
||||
found = found || isMergeable
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
|
||||
}
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
}
|
||||
// no usable parent => the runner will have to decide to fall back to a full or stop backup
|
||||
if (parentDestPath === undefined) {
|
||||
if (!found) {
|
||||
baseUuidToSrcVdi.delete(baseUuid)
|
||||
} else {
|
||||
this.#parentVdiPaths[vhdDir] = parentDestPath
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -124,44 +123,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
}
|
||||
}
|
||||
|
||||
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
|
||||
assert.notStrictEqual(
|
||||
this.#vhds,
|
||||
undefined,
|
||||
'_transfer must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
|
||||
const parentVdiPaths = this.#parentVdiPaths
|
||||
const { handler } = this._adapter
|
||||
const vhds = this.#vhds
|
||||
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
if (isDifferencing) {
|
||||
assert.notStrictEqual(
|
||||
parentVdiPaths,
|
||||
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
const parentPath = parentVdiPaths[dirname(path)]
|
||||
// we are in a incremental backup
|
||||
// we already computed the chain in checkBaseVdis
|
||||
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
|
||||
// forbid any kind of loop
|
||||
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD if needed
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
const adapter = this._adapter
|
||||
const oldEntries = this._oldEntries
|
||||
@@ -180,10 +141,16 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
|
||||
return { size: 0 }
|
||||
}
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
// update this.#vhds before eventually skipping transfer, so that
|
||||
// updateUuidAndChain has all the mandatory data
|
||||
const vhds = (this.#vhds = mapValues(
|
||||
const vhds = mapValues(
|
||||
deltaExport.vdis,
|
||||
vdi =>
|
||||
`vdis/${jobId}/${
|
||||
@@ -193,15 +160,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
))
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
|
||||
return { size: 0 }
|
||||
}
|
||||
)
|
||||
|
||||
metadataContent = {
|
||||
isVhdDifferencing,
|
||||
@@ -217,13 +176,38 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await asyncEach(
|
||||
Object.keys(deltaExport.vdis),
|
||||
async id => {
|
||||
Object.entries(deltaExport.vdis),
|
||||
async ([id, vdi]) => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDifferencing) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
)
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||
)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
// since i += await asyncFun lead to race condition
|
||||
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||
@@ -235,6 +219,17 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
transferSize += transferSizeOneDisk
|
||||
|
||||
if (isDifferencing) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
},
|
||||
{
|
||||
concurrency: settings.diskPerVmConcurrency,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import assert from 'node:assert'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
@@ -15,7 +14,6 @@ import find from 'lodash/find.js'
|
||||
|
||||
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
assert.notStrictEqual(baseVm, undefined)
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
@@ -38,9 +36,7 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
|
||||
}
|
||||
}
|
||||
}
|
||||
updateUuidAndChain() {
|
||||
// nothing to do, the chaining is not modified in this case
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
|
||||
@@ -5,10 +5,6 @@ export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
updateUuidAndChain() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
@@ -230,7 +230,6 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
|
||||
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
|
||||
- `cleanup()`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- **Full**
|
||||
|
||||
@@ -282,33 +282,20 @@ export default class Esxi extends EventEmitter {
|
||||
const networks = []
|
||||
|
||||
for (const key of Object.keys(vmx)) {
|
||||
const matches = key.match(/^(scsi|ide|ethernet)[0-9]+$/)
|
||||
if (matches === null) {
|
||||
continue
|
||||
}
|
||||
const channelType = matches[1]
|
||||
if (channelType === 'ide' || channelType === 'scsi') {
|
||||
const diskChannel = vmx[key]
|
||||
for (const diskIndex in Object.values(diskChannel)) {
|
||||
const disk = diskChannel[diskIndex]
|
||||
if (typeof disk !== 'object') {
|
||||
if (key.match('^scsi([0-9]+)$') !== null) {
|
||||
const scsiChannel = vmx[key]
|
||||
for (const diskIndex in Object.values(scsiChannel)) {
|
||||
const disk = scsiChannel[diskIndex]
|
||||
if (typeof disk !== 'object' || disk.deviceType !== 'scsi-hardDisk') {
|
||||
continue
|
||||
}
|
||||
// can be something other than a disk, like a controller card
|
||||
if (channelType === 'scsi' && disk.deviceType !== 'scsi-hardDisk') {
|
||||
continue
|
||||
}
|
||||
// ide hard disk don't have deviceType, but cdroms have one
|
||||
if (channelType === 'ide' && disk.deviceType === 'atapi-cdrom') {
|
||||
continue
|
||||
}
|
||||
|
||||
disks.push({
|
||||
...(await this.#inspectVmdk(dataStores, dataStore, dirname(vmxPath), disk.fileName)),
|
||||
node: `${key}:${diskIndex}`,
|
||||
})
|
||||
}
|
||||
} else if (channelType === 'ethernet') {
|
||||
}
|
||||
if (key.match('^ethernet([0-9]+)$') !== null) {
|
||||
const ethernet = vmx[key]
|
||||
|
||||
networks.push({
|
||||
@@ -328,7 +315,7 @@ export default class Esxi extends EventEmitter {
|
||||
for (const diskIndex in snapshot.disks) {
|
||||
const fileName = snapshot.disks[diskIndex].fileName
|
||||
snapshot.disks[diskIndex] = {
|
||||
node: snapshot.disks[diskIndex]?.node, // 'scsi0:0' , 'ide0:0', ...,
|
||||
node: snapshot.disks[diskIndex]?.node, // 'scsi0:0',
|
||||
...(await this.#inspectVmdk(dataStores, dataStore, dirname(vmxPath), fileName)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
- [REST API] Export host [SMT](https://en.wikipedia.org/wiki/Simultaneous_multithreading) status at `/hosts/:id/smt` [Forum#71374](https://xcp-ng.org/forum/post/71374)
|
||||
- [Home & REST API] `$container` field of an halted VM now points to a host if a VDI is on a local storage [Forum#71769](https://xcp-ng.org/forum/post/71769)
|
||||
- [Size Input] Ability to select two new units in the dropdown (`TiB`, `PiB`) (PR [#7382](https://github.com/vatesfr/xen-orchestra/pull/7382))
|
||||
|
||||
- [Backup] Ability to set a number of retries for VM backup failures [#2139](https://github.com/vatesfr/xen-orchestra/issues/2139) (PR [#7308](https://github.com/vatesfr/xen-orchestra/pull/7308))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -22,8 +22,6 @@
|
||||
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
|
||||
- [Remotes] Correctly clear error when the remote is tested with success
|
||||
- [Import/VMWare] Fix importing last snapshot (PR [#7370](https://github.com/vatesfr/xen-orchestra/pull/7370))
|
||||
- [Host/Reboot] Fix false positive warning when restarting an host after updates (PR [#7366](https://github.com/vatesfr/xen-orchestra/pull/7366))
|
||||
- [Import/VMWare] Handle VMs with IDE disks
|
||||
|
||||
### Packages to release
|
||||
|
||||
@@ -43,7 +41,6 @@
|
||||
|
||||
- @xen-orchestra/backups patch
|
||||
- @xen-orchestra/fs patch
|
||||
- @xen-orchestra/vmware-explorer patch
|
||||
- @xen-orchestra/xapi patch
|
||||
- vhd-lib patch
|
||||
- xo-server minor
|
||||
|
||||
@@ -46,9 +46,9 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
|
||||
get compressionType() {
|
||||
const compressionType = this.#vhds[0].compressionType
|
||||
for (let i = 0; i < this.#vhds.length; i++) {
|
||||
if (compressionType !== this.#vhds[i].compressionType) {
|
||||
const compressionType = this.vhds[0].compressionType
|
||||
for (let i = 0; i < this.vhds.length; i++) {
|
||||
if (compressionType !== this.vhds[i].compressionType) {
|
||||
return 'MIXED'
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,11 @@ const SCHEMA_SETTINGS = {
|
||||
minimum: 1,
|
||||
optional: true,
|
||||
},
|
||||
nRetriesVmBackupFailures: {
|
||||
minimum: 0,
|
||||
optional: true,
|
||||
type: 'number',
|
||||
},
|
||||
preferNbd: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import semver from 'semver'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import assert from 'assert'
|
||||
import { format } from 'json-rpc-peer'
|
||||
@@ -137,38 +136,13 @@ export async function restart({
|
||||
const pool = this.getObject(host.$poolId, 'pool')
|
||||
const master = this.getObject(pool.master, 'host')
|
||||
const hostRebootRequired = host.rebootRequired
|
||||
|
||||
// we are currently in an host upgrade process
|
||||
if (hostRebootRequired && host.id !== master.id) {
|
||||
// this error is not ideal but it means that the pool master must be fully upgraded/rebooted before the current host can be rebooted.
|
||||
//
|
||||
// there is a single error for the 3 cases because the client must handle them the same way
|
||||
const throwError = () =>
|
||||
incorrectState({
|
||||
actual: hostRebootRequired,
|
||||
expected: false,
|
||||
object: master.id,
|
||||
property: 'rebootRequired',
|
||||
})
|
||||
|
||||
if (semver.lt(master.version, host.version)) {
|
||||
log.error(`master version (${master.version}) is older than the host version (${host.version})`, {
|
||||
masterId: master.id,
|
||||
hostId: host.id,
|
||||
})
|
||||
throwError()
|
||||
}
|
||||
|
||||
if (semver.eq(master.version, host.version)) {
|
||||
if ((await this.getXapi(host).listMissingPatches(master._xapiId)).length > 0) {
|
||||
log.error('master has missing patches', { masterId: master.id })
|
||||
throwError()
|
||||
}
|
||||
if (master.rebootRequired) {
|
||||
log.error('master needs to reboot')
|
||||
throwError()
|
||||
}
|
||||
}
|
||||
if (hostRebootRequired && host.id !== master.id && host.version === master.version) {
|
||||
throw incorrectState({
|
||||
actual: hostRebootRequired,
|
||||
expected: false,
|
||||
object: master.id,
|
||||
property: 'rebootRequired',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -141,6 +141,7 @@ const messages = {
|
||||
removeColor: 'Remove color',
|
||||
xcpNg: 'XCP-ng',
|
||||
noFileSelected: 'No file selected',
|
||||
nRetriesVmBackupFailures: 'Number of retries if VM backup fails',
|
||||
|
||||
// ----- Modals -----
|
||||
alertOk: 'OK',
|
||||
|
||||
@@ -54,9 +54,13 @@ export default class IsoDevice extends Component {
|
||||
() => this.props.vm.$pool,
|
||||
() => this.props.vm.$container,
|
||||
(vmPool, vmContainer) => sr => {
|
||||
const vmRunning = vmContainer !== vmPool
|
||||
const sameHost = vmContainer === sr.$container
|
||||
const samePool = vmPool === sr.$pool
|
||||
|
||||
return (
|
||||
vmPool === sr.$pool &&
|
||||
(sr.shared || vmContainer === sr.$container) &&
|
||||
samePool &&
|
||||
(vmRunning ? sr.shared || sameHost : true) &&
|
||||
(sr.SR_type === 'iso' || (sr.SR_type === 'udev' && sr.size))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -189,6 +189,7 @@ const getInitialState = ({ preSelectedVmIds, setHomeVmIdsSelection, suggestedExc
|
||||
drMode: false,
|
||||
name: '',
|
||||
nbdConcurrency: 1,
|
||||
nRetriesVmBackupFailures: 0,
|
||||
preferNbd: false,
|
||||
remotes: [],
|
||||
schedules: {},
|
||||
@@ -635,6 +636,11 @@ const New = decorate([
|
||||
nbdConcurrency,
|
||||
})
|
||||
},
|
||||
setNRetriesVmBackupFailures({ setGlobalSettings }, nRetries) {
|
||||
setGlobalSettings({
|
||||
nRetriesVmBackupFailures: nRetries,
|
||||
})
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
compressionId: generateId,
|
||||
@@ -644,6 +650,7 @@ const New = decorate([
|
||||
inputMaxExportRate: generateId,
|
||||
inputPreferNbd: generateId,
|
||||
inputNbdConcurrency: generateId,
|
||||
inputNRetriesVmBackupFailures: generateId,
|
||||
inputTimeoutId: generateId,
|
||||
|
||||
// In order to keep the user preference, the offline backup is kept in the DB
|
||||
@@ -756,6 +763,7 @@ const New = decorate([
|
||||
fullInterval,
|
||||
maxExportRate,
|
||||
nbdConcurrency = 1,
|
||||
nRetriesVmBackupFailures = 0,
|
||||
offlineBackup,
|
||||
offlineSnapshot,
|
||||
preferNbd,
|
||||
@@ -990,6 +998,17 @@ const New = decorate([
|
||||
value={concurrency}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputNRetriesVmBackupFailures}>
|
||||
<strong>{_('nRetriesVmBackupFailures')}</strong>
|
||||
</label>
|
||||
<Number
|
||||
id={state.inputNRetriesVmBackupFailures}
|
||||
min={0}
|
||||
onChange={effects.setNRetriesVmBackupFailures}
|
||||
value={nRetriesVmBackupFailures}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputTimeoutId}>
|
||||
<strong>{_('timeout')}</strong>
|
||||
|
||||
@@ -124,6 +124,8 @@ const NewMirrorBackup = decorate([
|
||||
setAdvancedSettings({ timeout: timeout !== undefined ? timeout * 3600e3 : undefined }),
|
||||
setMaxExportRate: ({ setAdvancedSettings }, rate) =>
|
||||
setAdvancedSettings({ maxExportRate: rate !== undefined ? rate * (1024 * 1024) : undefined }),
|
||||
setNRetriesVmBackupFailures: ({ setAdvancedSettings }, nRetriesVmBackupFailures) =>
|
||||
setAdvancedSettings({ nRetriesVmBackupFailures }),
|
||||
setSourceRemote: (_, obj) => () => ({
|
||||
sourceRemote: obj === null ? {} : obj.value,
|
||||
}),
|
||||
@@ -204,6 +206,7 @@ const NewMirrorBackup = decorate([
|
||||
inputConcurrencyId: generateId,
|
||||
inputTimeoutId: generateId,
|
||||
inputMaxExportRateId: generateId,
|
||||
inputNRetriesVmBackupFailures: generateId,
|
||||
isBackupInvalid: state =>
|
||||
state.isMissingName || state.isMissingBackupMode || state.isMissingSchedules || state.isMissingRetention,
|
||||
isFull: state => state.mode === 'full',
|
||||
@@ -231,7 +234,7 @@ const NewMirrorBackup = decorate([
|
||||
}),
|
||||
injectState,
|
||||
({ state, effects, intl: { formatMessage } }) => {
|
||||
const { concurrency, timeout, maxExportRate } = state.advancedSettings
|
||||
const { concurrency, timeout, maxExportRate, nRetriesVmBackupFailures = 0 } = state.advancedSettings
|
||||
return (
|
||||
<form id={state.formId}>
|
||||
<Container>
|
||||
@@ -314,6 +317,17 @@ const NewMirrorBackup = decorate([
|
||||
value={concurrency}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputNRetriesVmBackupFailures}>
|
||||
<strong>{_('nRetriesVmBackupFailures')}</strong>
|
||||
</label>
|
||||
<Number
|
||||
id={state.inputNRetriesVmBackupFailures}
|
||||
min={0}
|
||||
onChange={effects.setNRetriesVmBackupFailures}
|
||||
value={nRetriesVmBackupFailures}
|
||||
/>
|
||||
</FormGroup>
|
||||
<FormGroup>
|
||||
<label htmlFor={state.inputTimeoutId}>
|
||||
<strong>{_('timeout')}</strong>
|
||||
|
||||
@@ -319,6 +319,7 @@ class JobsTable extends React.Component {
|
||||
compression,
|
||||
concurrency,
|
||||
fullInterval,
|
||||
nRetriesVmBackupFailures,
|
||||
offlineBackup,
|
||||
offlineSnapshot,
|
||||
proxyId,
|
||||
@@ -349,6 +350,9 @@ class JobsTable extends React.Component {
|
||||
{compression !== undefined && (
|
||||
<Li>{_.keyValue(_('compression'), compression === 'native' ? 'GZIP' : compression)}</Li>
|
||||
)}
|
||||
{nRetriesVmBackupFailures > 0 && (
|
||||
<Li>{_.keyValue(_('nRetriesVmBackupFailures'), nRetriesVmBackupFailures)}</Li>
|
||||
)}
|
||||
</Ul>
|
||||
)
|
||||
},
|
||||
|
||||
@@ -3,6 +3,7 @@ import _ from 'intl'
|
||||
import Copiable from 'copiable'
|
||||
import decorate from 'apply-decorators'
|
||||
import Icon from 'icon'
|
||||
import map from 'lodash/map'
|
||||
import React from 'react'
|
||||
import store from 'store'
|
||||
import HomeTags from 'home-tags'
|
||||
@@ -23,21 +24,10 @@ export default decorate([
|
||||
provideState({
|
||||
computed: {
|
||||
areHostsVersionsEqual: ({ areHostsVersionsEqualByPool }, { host }) => areHostsVersionsEqualByPool[host.$pool],
|
||||
inMemoryVms: (_, { vms }) => {
|
||||
const result = []
|
||||
for (const key of Object.keys(vms)) {
|
||||
const vm = vms[key]
|
||||
const { power_state } = vm
|
||||
if (power_state === 'Running' || power_state === 'Paused') {
|
||||
result.push(vm)
|
||||
}
|
||||
}
|
||||
return result
|
||||
},
|
||||
},
|
||||
}),
|
||||
injectState,
|
||||
({ statsOverview, host, nVms, vmController, state: { areHostsVersionsEqual, inMemoryVms } }) => {
|
||||
({ statsOverview, host, nVms, vmController, vms, state: { areHostsVersionsEqual } }) => {
|
||||
const pool = getObject(store.getState(), host.$pool)
|
||||
const vmsFilter = encodeURIComponent(new CM.Property('$container', new CM.String(host.id)).toString())
|
||||
return (
|
||||
@@ -130,7 +120,7 @@ export default decorate([
|
||||
tooltip={`${host.productBrand} (${formatSize(vmController.memory.size)})`}
|
||||
value={vmController.memory.size}
|
||||
/>
|
||||
{inMemoryVms.map(vm => (
|
||||
{map(vms, vm => (
|
||||
<UsageElement
|
||||
tooltip={`${vm.name_label} (${formatSize(vm.memory.size)})`}
|
||||
key={vm.id}
|
||||
|
||||
Reference in New Issue
Block a user