Compare commits

..

1 Commits

Author SHA1 Message Date
Florent Beauchamp
0e6b0ae06d refactor(xo-server): split migrate vm from esxi 2024-01-12 15:26:11 +00:00
43 changed files with 467 additions and 417 deletions

View File

@@ -41,7 +41,9 @@ export default class MultiNbdClient {
}
if (connectedClients.length < this.#clients.length) {
warn(
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${this.#clients.length} expected clients`
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${
this.#clients.length
} expected clients`
)
this.#clients = connectedClients
}

View File

@@ -1,4 +1,4 @@
import { Task } from '@vates/task'
import { Task } from './Task.mjs'
export class HealthCheckVmBackup {
#restoredVm
@@ -14,7 +14,7 @@ export class HealthCheckVmBackup {
async run() {
return Task.run(
{
properties: { name: 'vmstart' },
name: 'vmstart',
},
async () => {
let restoredVm = this.#restoredVm

View File

@@ -1,8 +1,8 @@
import assert from 'node:assert'
import { Task } from '@vates/task'
import { formatFilenameDate } from './_filenameDate.mjs'
import { importIncrementalVm } from './_incrementalVm.mjs'
import { Task } from './Task.mjs'
import { watchStreamSize } from './_watchStreamSize.mjs'
import { VhdNegative, VhdSynthetic } from 'vhd-lib'
import { decorateClass } from '@vates/decorate-with'
@@ -191,7 +191,7 @@ export class ImportVmBackup {
async #decorateIncrementalVmMetadata() {
const { additionnalVmTag, mapVdisSrs, useDifferentialRestore } = this._importIncrementalVmSettings
const ignoredVdis = new Set(
Object.entries(mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
@@ -240,7 +240,7 @@ export class ImportVmBackup {
return Task.run(
{
properties: { name: 'transfer' },
name: 'transfer',
},
async () => {
const xapi = this._xapi

View File

@@ -21,7 +21,7 @@ export class RestoreMetadataBackup {
})
} else {
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
const dataFileName = resolve('/', backupId, metadata.data ?? 'data.json').slice(1)
const dataFileName = resolve(backupId, metadata.data ?? 'data.json')
const data = await handler.readFile(dataFileName)
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it

View File

@@ -0,0 +1,155 @@
import CancelToken from 'promise-toolbox/CancelToken'
import Zone from 'node-zone'
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
}
const noop = Function.prototype
const serializeErrors = errors => (Array.isArray(errors) ? errors.map(serializeError) : errors)
// Create a serializable object from an error.
//
// Otherwise some fields might be non-enumerable and missing from logs.
const serializeError = error =>
error instanceof Error
? {
...error, // Copy enumerable properties.
code: error.code,
errors: serializeErrors(error.errors), // supports AggregateError
message: error.message,
name: error.name,
stack: error.stack,
}
: error
const $$task = Symbol('@xen-orchestra/backups/Task')
export class Task {
static get cancelToken() {
const task = Zone.current.data[$$task]
return task !== undefined ? task.#cancelToken : CancelToken.none
}
static run(opts, fn) {
return new this(opts).run(fn, true)
}
static wrapFn(opts, fn) {
// compatibility with @decorateWith
if (typeof fn !== 'function') {
;[fn, opts] = [opts, fn]
}
return function () {
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
}
}
#cancelToken
#id = Math.random().toString(36).slice(2)
#onLog
#zone
constructor({ name, data, onLog }) {
let parentCancelToken, parentId
if (onLog === undefined) {
const parent = Zone.current.data[$$task]
if (parent === undefined) {
onLog = noop
} else {
onLog = log => parent.#onLog(log)
parentCancelToken = parent.#cancelToken
parentId = parent.#id
}
}
const zone = Zone.current.fork('@xen-orchestra/backups/Task')
zone.data[$$task] = this
this.#zone = zone
const { cancel, token } = CancelToken.source(parentCancelToken && [parentCancelToken])
this.#cancelToken = token
this.cancel = cancel
this.#onLog = onLog
this.#log('start', {
data,
message: name,
parentId,
})
}
failure(error) {
this.#end('failure', serializeError(error))
}
info(message, data) {
this.#log('info', { data, message })
}
/**
* Run a function in the context of this task
*
* In case of error, the task will be failed.
*
* @typedef Result
* @param {() => Result} fn
* @param {boolean} last - Whether the task should succeed if there is no error
* @returns Result
*/
run(fn, last = false) {
return this.#zone.run(() => {
try {
const result = fn()
let then
if (result != null && typeof (then = result.then) === 'function') {
then.call(result, last && (value => this.success(value)), error => this.failure(error))
} else if (last) {
this.success(result)
}
return result
} catch (error) {
this.failure(error)
throw error
}
})
}
success(value) {
this.#end('success', value)
}
warning(message, data) {
this.#log('warning', { data, message })
}
wrapFn(fn, last) {
const task = this
return function () {
return task.run(() => fn.apply(this, arguments), last)
}
}
#end(status, result) {
this.#log('end', { result, status })
this.#onLog = logAfterEnd
}
#log(event, props) {
this.#onLog({
...props,
event,
taskId: this.#id,
timestamp: Date.now(),
})
}
}
for (const method of ['info', 'warning']) {
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
}

View File

@@ -11,10 +11,10 @@ import { decorateMethodsWith } from '@vates/decorate-with'
import { deduped } from '@vates/disposable/deduped.js'
import { getHandler } from '@xen-orchestra/fs'
import { parseDuration } from '@vates/parse-duration'
import { Task } from '@vates/task'
import { Xapi } from '@xen-orchestra/xapi'
import { RemoteAdapter } from './RemoteAdapter.mjs'
import { Task } from './Task.mjs'
createCachedLookup().patchGlobal()
@@ -154,8 +154,8 @@ process.on('message', async message => {
const result = message.runWithLogs
? await Task.run(
{
properties: { name: 'backup run' },
onProgress: data =>
name: 'backup run',
onLog: data =>
emitMessage({
data,
type: 'log',

View File

@@ -36,34 +36,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) {
logInfo(`merging VHD chain`, { chain })
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
logInfo(`merging VHD chain`, { chain })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
} finally {
clearInterval(handle)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
@@ -471,23 +469,20 @@ export async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, {
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
})
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ properties: { name: 'merge' } }, doMerge) : () => Promise.resolve()),
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
if (remove) {
@@ -509,12 +504,11 @@ export async function cleanVm(
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = backups.get(metadataPath)
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const mergedSize = metadataWithMergedVhd[metadataPath]
const { mode, size, vhds, xva } = metadata
@@ -524,26 +518,29 @@ export async function cleanVm(
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
if (fileSystemSize !== size && fileSystemSize !== undefined) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
console.warn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
if (mergedSize === undefined) {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize !== undefined && fileSystemSize !== size) {
logWarn('cleanVm: incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
}
} catch (error) {
@@ -551,9 +548,19 @@ export async function cleanVm(
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
// systematically update size and differentials after a merge
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
metadata.size = mergedSize ?? fileSystemSize ?? size
if (mergedSize) {
// all disks are now key disk
metadata.isVhdDifferencing = {}
for (const id of Object.values(metadata.vdis ?? {})) {
metadata.isVhdDifferencing[`${id}.vhd`] = false
}
}
mustRegenerateCache = true
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })

View File

@@ -6,9 +6,9 @@ import { CancelToken } from 'promise-toolbox'
import { compareVersions } from 'compare-versions'
import { createVhdStreamWithLength } from 'vhd-lib'
import { defer } from 'golike-defer'
import { Task } from '@vates/task'
import { cancelableMap } from './_cancelableMap.mjs'
import { Task } from './Task.mjs'
import pick from 'lodash/pick.js'
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM

View File

@@ -1,5 +1,4 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { Task } from '@vates/task'
import Disposable from 'promise-toolbox/Disposable'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
@@ -7,6 +6,7 @@ import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
import { runTask } from './_runTask.mjs'
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
const DEFAULT_METADATA_SETTINGS = {
@@ -14,8 +14,6 @@ const DEFAULT_METADATA_SETTINGS = {
retentionXoMetadata: 0,
}
const noop = Function.prototype
export const Metadata = class MetadataBackupRunner extends Abstract {
_computeBaseSettings(config, job) {
const baseSettings = { ...DEFAULT_SETTINGS }
@@ -57,16 +55,13 @@ export const Metadata = class MetadataBackupRunner extends Abstract {
poolIds.map(id =>
this._getRecord('pool', id).catch(error => {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
new Task(
runTask(
{
properties: {
id,
name: 'get pool record',
type: 'pool',
},
name: 'get pool record',
data: { type: 'pool', id },
},
() => Promise.reject(error)
).catch(noop)
)
})
)
),
@@ -86,11 +81,11 @@ export const Metadata = class MetadataBackupRunner extends Abstract {
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
promises.push(
asyncMap(pools, async pool =>
new Task(
runTask(
{
properties: {
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
data: {
id: pool.$id,
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
pool,
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
type: 'pool',
@@ -105,17 +100,17 @@ export const Metadata = class MetadataBackupRunner extends Abstract {
schedule,
settings,
}).run()
).catch(noop)
)
)
)
}
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
promises.push(
new Task(
runTask(
{
properties: {
name: `Starting XO metadata backup. (${job.id})`,
name: `Starting XO metadata backup. (${job.id})`,
data: {
type: 'xo',
},
},
@@ -127,7 +122,7 @@ export const Metadata = class MetadataBackupRunner extends Abstract {
schedule,
settings,
}).run()
).catch(noop)
)
)
}
await Promise.all(promises)

View File

@@ -1,11 +1,12 @@
import { asyncMapSettled } from '@xen-orchestra/async-map'
import Disposable from 'promise-toolbox/Disposable'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { Task } from '@vates/task'
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
import { Task } from '../Task.mjs'
import createStreamThrottle from './_createStreamThrottle.mjs'
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
import { runTask } from './_runTask.mjs'
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
import { FullRemote } from './_vmRunners/FullRemote.mjs'
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
@@ -24,8 +25,6 @@ const DEFAULT_REMOTE_VM_SETTINGS = {
vmTimeout: 0,
}
const noop = Function.prototype
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
_computeBaseSettings(config, job) {
const baseSettings = { ...DEFAULT_SETTINGS }
@@ -64,13 +63,7 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
const baseSettings = this._baseSettings
const handleVm = vmUuid => {
const taskStart = {
properties: {
id: vmUuid,
name: 'backup VM',
type: 'VM',
},
}
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
const opts = {
baseSettings,
@@ -93,7 +86,7 @@ export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
}
return new Task(taskStart, () => vmBackup.run()).catch(noop)
return runTask(taskStart, () => vmBackup.run())
}
const { concurrency } = settings
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))

View File

@@ -1,11 +1,12 @@
import { asyncMapSettled } from '@xen-orchestra/async-map'
import Disposable from 'promise-toolbox/Disposable'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { Task } from '@vates/task'
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
import { Task } from '../Task.mjs'
import createStreamThrottle from './_createStreamThrottle.mjs'
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
import { runTask } from './_runTask.mjs'
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
import { FullXapi } from './_vmRunners/FullXapi.mjs'
@@ -33,8 +34,6 @@ const DEFAULT_XAPI_VM_SETTINGS = {
vmTimeout: 0,
}
const noop = Function.prototype
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
_computeBaseSettings(config, job) {
const baseSettings = { ...DEFAULT_SETTINGS }
@@ -58,16 +57,13 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
this._getRecord('SR', id).catch(error => {
new Task(
runTask(
{
properties: {
id,
name: 'get SR record',
type: 'SR',
},
name: 'get SR record',
data: { type: 'SR', id },
},
() => Promise.reject(error)
).catch(noop)
)
})
)
),
@@ -94,19 +90,13 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
const baseSettings = this._baseSettings
const handleVm = vmUuid => {
const taskStart = {
properties: {
id: vmUuid,
name: 'backup VM',
type: 'VM',
},
}
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
return this._getRecord('VM', vmUuid).then(
disposableVm =>
Disposable.use(disposableVm, vm => {
taskStart.data.name_label = vm.name_label
return new Task()(taskStart, () => {
return runTask(taskStart, () => {
const opts = {
baseSettings,
config,
@@ -131,12 +121,12 @@ export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
}
}
return vmBackup.run()
}).catch(noop)
})
}),
error =>
new Task(taskStart, () => {
runTask(taskStart, () => {
throw error
}).catch(noop)
})
)
}
const { concurrency } = settings

View File

@@ -1,12 +1,9 @@
import Disposable from 'promise-toolbox/Disposable'
import pTimeout from 'promise-toolbox/timeout'
import { compileTemplate } from '@xen-orchestra/template'
import { Task } from '@vates/task'
import { runTask } from './_runTask.mjs'
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
const noop = Function.prototype
export const DEFAULT_SETTINGS = {
getRemoteTimeout: 300e3,
reportWhen: 'failure',
@@ -39,16 +36,13 @@ export const Abstract = class AbstractRunner {
})
} catch (error) {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
Task.run(
runTask(
{
properties: {
id: remoteId,
name: 'get remote adapter',
type: 'remote',
},
name: 'get remote adapter',
data: { type: 'remote', id: remoteId },
},
() => Promise.reject(error)
).catch(noop)
)
}
}
}

View File

@@ -1,9 +1,9 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { Task } from '@vates/task'
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
import { formatFilenameDate } from '../_filenameDate.mjs'
import { Task } from '../Task.mjs'
export const PATH_DB_DUMP = '/pool/xmldbdump'
@@ -54,8 +54,8 @@ export class PoolMetadataBackup {
([remoteId, adapter]) =>
Task.run(
{
properties: {
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
data: {
id: remoteId,
type: 'remote',
},

View File

@@ -1,9 +1,9 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { join } from '@xen-orchestra/fs/path'
import { Task } from '@vates/task'
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
import { formatFilenameDate } from '../_filenameDate.mjs'
import { Task } from '../Task.mjs'
export class XoMetadataBackup {
constructor({ config, job, remoteAdapters, schedule, settings }) {
@@ -51,8 +51,8 @@ export class XoMetadataBackup {
([remoteId, adapter]) =>
Task.run(
{
properties: {
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
data: {
id: remoteId,
type: 'remote',
},

View File

@@ -0,0 +1,5 @@
import { Task } from '../Task.mjs'
const noop = Function.prototype
export const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs

View File

@@ -1,11 +1,10 @@
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { Task } from '@vates/task'
import { AbstractRemote } from './_AbstractRemote.mjs'
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
import { watchStreamSize } from '../../_watchStreamSize.mjs'
import { Task } from '../../Task.mjs'
export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote {
_getRemoteWriter() {

View File

@@ -1,7 +1,6 @@
import { asyncEach } from '@vates/async-each'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { Task } from '@vates/task'
import assert from 'node:assert'
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
import mapValues from 'lodash/mapValues.js'
@@ -9,6 +8,7 @@ import mapValues from 'lodash/mapValues.js'
import { AbstractRemote } from './_AbstractRemote.mjs'
import { forkDeltaExport } from './_forkDeltaExport.mjs'
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
import { Task } from '../../Task.mjs'
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
_getRemoteWriter() {

View File

@@ -2,7 +2,6 @@ import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { createLogger } from '@xen-orchestra/log'
import { pipeline } from 'node:stream'
import { Task } from '@vates/task'
import findLast from 'lodash/findLast.js'
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
import keyBy from 'lodash/keyBy.js'
@@ -14,6 +13,7 @@ import { exportIncrementalVm } from '../../_incrementalVm.mjs'
import { forkDeltaExport } from './_forkDeltaExport.mjs'
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
import { IncrementalXapiWriter } from '../_writers/IncrementalXapiWriter.mjs'
import { Task } from '../../Task.mjs'
import { watchStreamSize } from '../../_watchStreamSize.mjs'
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')

View File

@@ -1,6 +1,6 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { createLogger } from '@xen-orchestra/log'
import { Task } from '@vates/task'
import { Task } from '../../Task.mjs'
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
@@ -80,7 +80,7 @@ export const Abstract = class AbstractVmBackupRunner {
// create a task to have an info in the logs and reports
return Task.run(
{
properties: { name: 'health check' },
name: 'health check',
},
() => {
Task.info(`This VM doesn't match the health check's tags for this schedule`)

View File

@@ -5,9 +5,9 @@ import { asyncMap } from '@xen-orchestra/async-map'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { formatDateTime } from '@xen-orchestra/xapi'
import { Task } from '@vates/task'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { Task } from '../../Task.mjs'
import { Abstract } from './_Abstract.mjs'
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
@@ -142,7 +142,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
const settings = this._settings
if (this._mustDoSnapshot()) {
await Task.run({ properties: { name: 'snapshot' } }, async () => {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
await vm.$assertHealthyVdiChains()
}

View File

@@ -1,7 +1,6 @@
import { Task } from '@vates/task'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { Task } from '../../Task.mjs'
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
@@ -10,10 +9,10 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
constructor(props) {
super(props)
this.run = Task.wrap(
this.run = Task.wrapFn(
{
properties: {
name: 'export',
name: 'export',
data: {
id: props.remoteId,
type: 'remote',
@@ -64,7 +63,7 @@ export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
await deleteOldBackups()
}
await Task.run({ properties: { name: 'transfer' } }, async () => {
await Task.run({ name: 'transfer' }, async () => {
await adapter.outputStream(dataFilename, stream, {
maxStreamLength,
streamLength,

View File

@@ -1,10 +1,10 @@
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import { formatDateTime } from '@xen-orchestra/xapi'
import { Task } from '@vates/task'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { Task } from '../../Task.mjs'
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
@@ -14,10 +14,10 @@ export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
constructor(props) {
super(props)
this.run = Task.wrap(
this.run = Task.wrapFn(
{
properties: {
name: 'export',
name: 'export',
data: {
id: props.sr.uuid,
name_label: this._sr.name_label,
type: 'SR',
@@ -52,7 +52,7 @@ export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
}
let targetVmRef
await Task.run({ properties: { name: 'transfer' } }, async () => {
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await xapi.VM_import(stream, sr.$ref, vm =>
Promise.all([
!_warmMigration && vm.add_tags('Disaster Recovery'),

View File

@@ -8,11 +8,11 @@ import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { dirname } from 'node:path'
import { Task } from '@vates/task'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
@@ -71,17 +71,17 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
properties: {
name: 'export',
name: 'export',
data: {
id: this._remoteId,
isFull,
type: 'remote',
},
})
this.transfer = task.wrapInside(this.transfer)
this.healthCheck = task.wrapInside(this.healthCheck)
this.cleanup = task.wrapInside(this.cleanup)
this.afterBackup = task.wrap(this.afterBackup)
this.transfer = task.wrapFn(this.transfer)
this.healthCheck = task.wrapFn(this.healthCheck)
this.cleanup = task.wrapFn(this.cleanup)
this.afterBackup = task.wrapFn(this.afterBackup, true)
return task.run(() => this._prepare())
}
@@ -174,7 +174,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vm,
vmSnapshot,
}
const { size } = await Task.run({ properties: { name: 'transfer' } }, async () => {
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await asyncEach(
Object.entries(deltaExport.vdis),
@@ -205,7 +205,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
// don't write it as transferSize += await async function
// since i += await asyncFun lead to race condition
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates

View File

@@ -1,11 +1,11 @@
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { formatDateTime } from '@xen-orchestra/xapi'
import { Task } from '@vates/task'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { importIncrementalVm, TAG_BACKUP_SR, TAG_BASE_DELTA, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
@@ -40,21 +40,18 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
properties: {
name: 'export',
name: 'export',
data: {
id: this._sr.uuid,
isFull,
name_label: this._sr.name_label,
type: 'SR',
},
})
this.transfer = task.wrapInside(this.transfer)
if (this._healthCheckSr !== undefined) {
this.cleanup = task.wrapInside(this.cleanup)
this.healthCheck = task.wrap(this.healthCheck)
} else {
this.cleanup = task.wrap(this.cleanup)
}
const hasHealthCheckSr = this._healthCheckSr !== undefined
this.transfer = task.wrapFn(this.transfer)
this.cleanup = task.wrapFn(this.cleanup, !hasHealthCheckSr)
this.healthCheck = task.wrapFn(this.healthCheck, hasHealthCheckSr)
return task.run(() => this._prepare())
}
@@ -142,7 +139,7 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
const { uuid: srUuid, $xapi: xapi } = sr
let targetVmRef
await Task.run({ properties: { name: 'transfer' } }, async () => {
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await importIncrementalVm(this.#decorateVmMetadata(deltaExport), sr)
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),

View File

@@ -1,12 +1,12 @@
import { createLogger } from '@xen-orchestra/log'
import { join } from 'node:path'
import { Task } from '@vates/task'
import assert from 'node:assert'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
import { ImportVmBackup } from '../../ImportVmBackup.mjs'
import { Task } from '../../Task.mjs'
import * as MergeWorker from '../../merge-worker/index.mjs'
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
@@ -26,7 +26,7 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
async _cleanVm(options) {
try {
return await Task.run({ properties: { name: 'clean-vm' } }, () => {
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this._vmBackupDir, {
...options,
fixMetadata: true,
@@ -84,7 +84,7 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
)
return Task.run(
{
properties: { name: 'health check' },
name: 'health check',
},
async () => {
const xapi = sr.$xapi

View File

@@ -1,8 +1,8 @@
import { extractOpaqueRef } from '@xen-orchestra/xapi'
import { Task } from '@vates/task'
import assert from 'node:assert/strict'
import { HealthCheckVmBackup } from '../../HealthCheckVmBackup.mjs'
import { Task } from '../../Task.mjs'
export const MixinXapiWriter = (BaseClass = Object) =>
class MixinXapiWriter extends BaseClass {
@@ -32,7 +32,7 @@ export const MixinXapiWriter = (BaseClass = Object) =>
// copy VM
return Task.run(
{
properties: { name: 'health check' },
name: 'health check',
},
async () => {
const { $xapi: xapi } = sr
@@ -42,7 +42,7 @@ export const MixinXapiWriter = (BaseClass = Object) =>
if (await this.#isAlreadyOnHealthCheckSr(baseVm)) {
healthCheckVmRef = await Task.run(
{ properties: { name: 'cloning-vm' } },
{ name: 'cloning-vm' },
async () =>
await xapi
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
@@ -50,7 +50,7 @@ export const MixinXapiWriter = (BaseClass = Object) =>
)
} else {
healthCheckVmRef = await Task.run(
{ properties: { name: 'copying-vm' } },
{ name: 'copying-vm' },
async () =>
await xapi
.callAsync('VM.copy', this._targetVmRef, `Health Check - ${baseVm.name_label}`, sr.$ref)

View File

@@ -27,7 +27,6 @@
"@vates/fuse-vhd": "^2.0.0",
"@vates/nbd-client": "^3.0.0",
"@vates/parse-duration": "^0.1.1",
"@vates/task": "^0.2.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^4.1.3",
"@xen-orchestra/log": "^0.6.0",

View File

@@ -15,7 +15,7 @@ import { Readable } from 'stream'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup.mjs'
import { runBackupWorker } from '@xen-orchestra/backups/runBackupWorker.mjs'
import { Task } from '@vates/task'
import { Task } from '@xen-orchestra/backups/Task.mjs'
import { Xapi } from '@xen-orchestra/xapi'
const noop = Function.prototype
@@ -122,15 +122,15 @@ export default class Backups {
try {
await Task.run(
{
properties: {
name: 'backup run',
data: {
jobId: job.id,
jobName: job.name,
mode: job.mode,
name: 'backup run',
reportWhen: job.settings['']?.reportWhen,
scheduleId: schedule.id,
},
onProgress: onLog,
onLog,
},
() => run(params)
)
@@ -205,14 +205,14 @@ export default class Backups {
async (args, onLog) =>
Task.run(
{
properties: {
data: {
backupId,
jobId: metadata.jobId,
name: 'restore',
srId: srUuid,
time: metadata.timestamp,
},
onProgress: onLog,
name: 'restore',
onLog,
},
run
).catch(() => {}), // errors are handled by logs,
@@ -344,14 +344,12 @@ export default class Backups {
({ backupId, remote, xapi: xapiOptions }) =>
Disposable.use(app.remotes.getHandler(remote), xapiOptions && this.getXapi(xapiOptions), (handler, xapi) =>
runWithLogs(
async (args, onProgress) =>
async (args, onLog) =>
Task.run(
{
properties: {
metadata: JSON.parse(String(await handler.readFile(`${backupId}/metadata.json`))),
name: 'metadataRestore',
},
onProgress,
name: 'metadataRestore',
data: JSON.parse(String(await handler.readFile(`${backupId}/metadata.json`))),
onLog,
},
() =>
new RestoreMetadataBackup({

View File

@@ -31,7 +31,6 @@
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.5",
"@vates/task": "^0.2.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.44.3",
"@xen-orchestra/fs": "^4.1.3",

View File

@@ -1,5 +1,4 @@
export { default as host } from './host.mjs'
export { default as pool } from './pool.mjs'
export { default as SR } from './sr.mjs'
export { default as task } from './task.mjs'
export { default as VBD } from './vbd.mjs'

View File

@@ -26,7 +26,6 @@
"@vates/async-each": "^1.0.0",
"@vates/decorate-with": "^2.0.0",
"@vates/nbd-client": "^3.0.0",
"@vates/task": "^0.2.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.6.0",
"d3-time-format": "^4.1.0",

View File

@@ -1,106 +0,0 @@
import { asyncEach } from '@vates/async-each'
import { createLogger } from '@xen-orchestra/log'
import { Task } from '@vates/task'
import { getCurrentVmUuid } from './_XenStore.mjs'
const noop = Function.prototype
async function pCatch(p, code, cb) {
try {
return await p
} catch (error) {
if (error.code === code) {
return cb(error)
}
throw error
}
}
const { warn } = createLogger('xo:xapi:pool')
export default class Pool {
async emergencyShutdown() {
const poolMasterRef = this.pool.master
let currentVmRef
try {
currentVmRef = await this.call('VM.get_by_uuid', await getCurrentVmUuid())
// try to move current VM on pool master
const hostRef = await this.call('VM.get_resident_on', currentVmRef)
if (hostRef !== poolMasterRef) {
await Task.run(
{
properties: {
name: 'Migrating current VM to pool master',
currentVm: { $ref: currentVmRef },
poolMaster: { $ref: poolMasterRef },
},
},
async () => {
await this.callAsync('VM.pool_migrate', currentVmRef, poolMasterRef, {})
}
).catch(noop)
}
} catch (error) {
warn(error)
}
await pCatch(this.call('pool.disable_ha'), 'HA_NOT_ENABLED', noop)
const hostRefs = await this.call('host.get_all')
// disable all hosts and suspend all VMs
await asyncEach(hostRefs, async hostRef => {
await this.call('host.disable', hostRef).catch(warn)
const [controlDomainRef, vmRefs] = await Promise.all([
this.call('host.get_control_domain', hostRef),
this.call('host.get_resident_VMs', hostRef),
])
await asyncEach(vmRefs, vmRef => {
// never stop current VM otherwise the emergencyShutdown process would be interrupted
if (vmRef !== currentVmRef && vmRef !== controlDomainRef) {
return Task.run(
{
properties: {
name: 'suspending VM',
host: { $ref: hostRef },
vm: { $ref: vmRef },
},
},
async () => {
await pCatch(this.callAsync('VM.suspend', vmRef), 'VM_BAD_POWER_STATE', noop)
}
).catch(noop)
}
})
})
const shutdownHost = ref =>
Task.run(
{
properties: {
name: 'shutting down host',
host: { $ref: ref },
},
},
async () => {
await this.callAsync('host.shutdown', ref)
}
).catch(noop)
// shutdown all non-pool master hosts
await asyncEach(hostRefs, hostRef => {
// pool master will be shutdown at the end
if (hostRef !== poolMasterRef) {
return shutdownHost(hostRef)
}
})
// shutdown pool master
await shutdownHost(poolMasterRef)
}
}

View File

@@ -8,8 +8,6 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Settings/Logs] Use GitHub issue form with pre-filled fields when reporting a bug [#7142](https://github.com/vatesfr/xen-orchestra/issues/7142) (PR [#7274](https://github.com/vatesfr/xen-orchestra/pull/7274))
- [REST API] New pool action: `emergency_shutdown`, it suspends all the VMs and then shuts down all the host [#7277](https://github.com/vatesfr/xen-orchestra/issues/7277) (PR [#7279](https://github.com/vatesfr/xen-orchestra/pull/7279))
- [Tasks] Hide `/rrd_updates` tasks by default
### Bug fixes
@@ -21,8 +19,6 @@
- [Replication/Health Check] Fix `healthCheckVm.add_tag is not a function` error [Forum#69156](https://xcp-ng.org/forum/post/69156)
- [Plugin/load-balancer] Prevent unwanted migrations to hosts with low free memory (PR [#7288](https://github.com/vatesfr/xen-orchestra/pull/7288))
- Avoid unnecessary `pool.add_to_other_config: Duplicate key` error in XAPI log [Forum#68761](https://xcp-ng.org/forum/post/68761)
- [Jobs] Reset parameters when editing method to avoid invalid parameters on execution [Forum#69299](https://xcp-ng.org/forum/post/69299)
- [Metadata Backup] Fix `ENOENT` error when restoring an _XO Config_ backup [Forum#68999](https://xcp-ng.org/forum/post/68999)
### Packages to release
@@ -41,9 +37,9 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- @xen-orchestra/xapi minor
- @xen-orchestra/xapi patch
- xen-api patch
- xo-server minor
- xo-server patch
- xo-server-load-balancer patch
- xo-web minor

View File

@@ -32,9 +32,7 @@ Then you need to restart the VM.
If you have lost your password to log in to the XOA webpage, you can reset it. From the XOA CLI (for login/access info for the CLI, [see here](xoa.md#first-console-connection)), use the following command and insert the email/account you wish to recover:
```sh
sudo xo-server-recover-account youremail@here.com
```
`xo-server-recover-account youremail@here.com`
It will prompt you to set a new password. If you provide an email here that does not exist in XOA yet, it will create a new account using it, with admin permissions - you can use that new account to log in as well.

View File

@@ -306,7 +306,7 @@ class Merger {
const finalVhdSize = this.#state?.vhdSize ?? 0
const mergedDataSize = this.#state?.mergedDataSize ?? 0
await this.#handler.unlink(this.#statePath).catch(warn)
return { mergedDataSize, finalVhdSize }
return { mergedDataSize, finalVhdSize}
}
}

View File

@@ -40,7 +40,6 @@
"@vates/parse-duration": "^0.1.1",
"@vates/predicates": "^1.1.0",
"@vates/read-chunk": "^1.2.0",
"@vates/task": "^0.2.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.44.3",
"@xen-orchestra/cron": "^1.0.6",

View File

@@ -33,6 +33,7 @@ html
i.fa.fa-sign-in
| Sign in
else
div.mb-2
each label, id in strategies
div: a(href = 'signin/' + id).btn.btn-block.btn-primary.mb-1 Sign in with #{label}
form(action = 'signin/local' method = 'post')

View File

@@ -31,7 +31,6 @@ const AUTHORIZATIONS = {
XVA: STARTER, // @todo handleExport in xen-orchestra/packages/xo-server/src/api/vm.mjs
},
LIST_MISSING_PATCHES: STARTER,
POOL_EMERGENCY_SHUTDOWN: ENTERPRISE,
ROLLING_POOL_UPDATE: ENTERPRISE,
}

View File

@@ -5,7 +5,7 @@ import { createLogger } from '@xen-orchestra/log'
import { createRunner } from '@xen-orchestra/backups/Backup.mjs'
import { parseMetadataBackupId } from '@xen-orchestra/backups/parseMetadataBackupId.mjs'
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup.mjs'
import { Task } from '@vates/task'
import { Task } from '@xen-orchestra/backups/Task.mjs'
import { debounceWithKey, REMOVE_CACHE_ENTRY } from '../_pDebounceWithKey.mjs'
import { handleBackupLog } from '../_handleBackupLog.mjs'
@@ -124,8 +124,8 @@ export default class metadataBackup {
const localTaskIds = { __proto__: null }
return Task.run(
{
properties: { name: 'backup run' },
onProgress: log =>
name: 'backup run',
onLog: log =>
handleBackupLog(log, {
localTaskIds,
logger,

View File

@@ -166,6 +166,129 @@ export default class MigrateVm {
return esxi.getAllVmMetadata()
}
@decorateWith(deferrable)
async _createVdis($defer, { diskChains, sr, xapi, vm }) {
const vdis = {}
for (const [node, chainByNode] of Object.entries(diskChains)) {
const vdi = await xapi._getOrWaitObject(
await xapi.VDI_create({
name_description: 'fromESXI' + chainByNode[0].descriptionLabel,
name_label: '[ESXI]' + chainByNode[0].nameLabel,
SR: sr.$ref,
virtual_size: chainByNode[0].capacity,
})
)
// it can fail before the vdi is connected to the vm
$defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
await xapi.VBD_create({
VDI: vdi.$ref,
VM: vm.$ref,
})
vdis[node] = vdi
}
return vdis
}
async #instantiateVhd({ esxi, disk, lookMissingBlockInParent = true, parentVhd, thin }) {
const { fileName, path, datastore, isFull } = disk
let vhd
if (isFull) {
vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin })
await vhd.readBlockAllocationTable()
} else {
if (parentVhd === undefined) {
throw new Error(`Can't import delta of a running VM without its parent VHD`)
}
vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd, { lookMissingBlockInParent })
}
return vhd
}
async #importDiskChain({ esxi, diskChain, lookMissingBlockInParent = true, parentVhd, thin, vdi }) {
let vhd
for (let diskIndex = 0; diskIndex < diskChain.length; diskIndex++) {
const disk = diskChain[diskIndex]
vhd = await this.#instantiateVhd({ esxi, disk, lookMissingBlockInParent, parentVhd, thin })
}
if (thin || parentVhd !== undefined) {
const stream = vhd.stream()
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
} else {
// no transformation when there is no snapshot in thick mode
const stream = await vhd.rawContent()
await vdi.$importContent(stream, { format: VDI_FORMAT_RAW })
}
return vhd
}
async #coldImportDiskChainFromEsxi({ esxi, diskChains, isRunning, stopSource, vdis, thin, vmId }) {
if (isRunning) {
if (stopSource) {
// it the vm was running, we stop it and transfer the data in the active disk
await Task.run({ properties: { name: 'powering down source VM' } }, () => esxi.powerOff(vmId))
} else {
throw new Error(`can't cold import disk from VM ${vmId} with stopSource disabled `)
}
}
await Promise.all(
Object.entries(diskChains).map(async ([node, diskChainByNode]) =>
Task.run({ properties: { name: `Cold import of disks ${node}` } }, async () => {
const vdi = vdis[node]
return this.#importDiskChain({ esxi, diskChain: diskChainByNode, thin, vdi })
})
)
)
}
async #warmImportDiskChainFromEsxi({ esxi, diskChains, isRunning, stopSource, thin, vdis, vmId }) {
if (!isRunning) {
return this.#coldImportDiskChainFromEsxi({ esxi, diskChains, isRunning, stopSource, vdis, vmId })
}
const vhds = await Promise.all(
// we need to to the cold import on all disks before stoppng the VM and starting to import the last delta
Object.entries(diskChains).map(async ([node, chainByNode]) =>
Task.run({ properties: { name: `Cold import of disks ${node}` } }, async () => {
const vdi = vdis[node]
// it can be empty if the VM don't have a snapshot
// nothing can be warm tranferred
if (chainByNode.length === 1) {
return
}
// if the VM is running we'll transfer everything before the last , which is an active disk
// the esxi api does not allow us to read an active disk
// later we'll stop the VM and transfer this snapshot
return this.#importDiskChain({ esxi, diskChain: chainByNode.slice(0, -1), thin, vdi })
})
)
)
if (stopSource) {
// The vm was running, we stop it and transfer the data in the active disk
await Task.run({ properties: { name: 'powering down source VM' } }, () => esxi.powerOff(vmId))
await Promise.all(
Object.keys(diskChains).map(async (node, index) => {
await Task.run({ properties: { name: `Transfering deltas of ${index}` } }, async () => {
const chainByNode = diskChains[node]
const vdi = vdis[node]
if (vdi === undefined) {
throw new Error(`Can't import delta of a running VM without its parent vdi`)
}
const vhd = vhds[index]
return this.#importDiskChain({ esxi, diskChain: chainByNode.slice(-1), parentVhd: vhd, thin, vdi })
})
})
)
} else {
Task.warning(`Import from VM ${vmId} with stopSource disabled won't contains the data of the mast snapshot`)
}
}
@decorateWith(deferrable)
async migrationfromEsxi(
$defer,
@@ -231,96 +354,18 @@ export default class MigrateVm {
)
return vm
})
$defer.onFailure.call(xapi, 'VM_destroy', vm.$ref)
const vhds = await Promise.all(
Object.keys(chainsByNodes).map(async (node, userdevice) =>
Task.run({ properties: { name: `Cold import of disks ${node}` } }, async () => {
const chainByNode = chainsByNodes[node]
const vdi = await xapi._getOrWaitObject(
await xapi.VDI_create({
name_description: 'fromESXI' + chainByNode[0].descriptionLabel,
name_label: '[ESXI]' + chainByNode[0].nameLabel,
SR: sr.$ref,
virtual_size: chainByNode[0].capacity,
})
)
// it can fail before the vdi is connected to the vm
$defer.onFailure.call(xapi, 'VDI_destroy', vdi.$ref)
await xapi.VBD_create({
VDI: vdi.$ref,
VM: vm.$ref,
})
let parentVhd, vhd
// if the VM is running we'll transfer everything before the last , which is an active disk
// the esxi api does not allow us to read an active disk
// later we'll stop the VM and transfer this snapshot
const nbColdDisks = isRunning ? chainByNode.length - 1 : chainByNode.length
for (let diskIndex = 0; diskIndex < nbColdDisks; diskIndex++) {
// the first one is a RAW disk ( full )
const disk = chainByNode[diskIndex]
const { fileName, path, datastore, isFull } = disk
if (isFull) {
vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin })
await vhd.readBlockAllocationTable()
} else {
vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd)
}
parentVhd = vhd
}
// it can be empty if the VM don't have a snapshot and is running
if (vhd !== undefined) {
if (thin) {
const stream = vhd.stream()
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
} else {
// no transformation when there is no snapshot in thick mode
const stream = await vhd.rawContent()
await vdi.$importContent(stream, { format: VDI_FORMAT_RAW })
}
}
return { vdi, vhd }
})
)
)
if (isRunning && stopSource) {
// it the vm was running, we stop it and transfer the data in the active disk
await Task.run({ properties: { name: 'powering down source VM' } }, () => esxi.powerOff(vmId))
await Promise.all(
Object.keys(chainsByNodes).map(async (node, userdevice) => {
await Task.run({ properties: { name: `Transfering deltas of ${userdevice}` } }, async () => {
const chainByNode = chainsByNodes[node]
const disk = chainByNode[chainByNode.length - 1]
const { fileName, path, datastore, isFull } = disk
const { vdi, vhd: parentVhd } = vhds[userdevice]
let vhd
if (vdi === undefined) {
throw new Error(`Can't import delta of a running VM without its parent vdi`)
}
if (isFull) {
vhd = await VhdEsxiRaw.open(esxi, datastore, path + '/' + fileName, { thin })
await vhd.readBlockAllocationTable()
} else {
if (parentVhd === undefined) {
throw new Error(`Can't import delta of a running VM without its parent VHD`)
}
// we only want to transfer blocks present in the delta vhd, not the full vhd chain
vhd = await openDeltaVmdkasVhd(esxi, datastore, path + '/' + fileName, parentVhd, {
lookMissingBlockInParent: false,
})
}
const stream = vhd.stream()
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
})
})
)
}
const vdis = await this._createVdis({ diskChains: chainsByNodes, sr, xapi, vm })
$defer.onFailure.call(async () => Object.values(vdis).map(vdi => vdi && xapi.VDI_destroy(vdi.$ref)))
await this.#coldImportDiskChainFromEsxi({
esxi,
diskChains: chainsByNodes,
isRunning,
stopSource,
thin,
vdis,
vmId,
})
await Task.run({ properties: { name: 'Finishing transfer' } }, async () => {
// remove the importing in label

View File

@@ -230,11 +230,6 @@ export default class RestApi {
collections.pools.actions = {
__proto__: null,
emergency_shutdown: async ({ xapiObject }) => {
await app.checkFeatureAuthorization('POOL_EMERGENCY_SHUTDOWN')
await xapiObject.$xapi.pool_emergencyShutdown()
},
rolling_update: async ({ xoObject }) => {
await app.checkFeatureAuthorization('ROLLING_POOL_UPDATE')

View File

@@ -283,14 +283,7 @@ export default class Jobs extends Component {
})
}
_handleSelectMethod = action => {
this.setState({ action })
// reset parameters
//
// see https://xcp-ng.org/forum/post/69299
this.refs.params.value = undefined
}
_handleSelectMethod = action => this.setState({ action })
_handleSubmit = () => {
const { name, method, params } = this.refs

View File

@@ -61,7 +61,7 @@ const TASK_ITEM_STYLE = {
}
const FILTERS = {
filterOutShortTasks: '!name_label: |(SR.scan host.call_plugin "/rrd_updates")',
filterOutShortTasks: '!name_label: |(SR.scan host.call_plugin)',
}
@connectStore(() => ({