Compare commits

..

2 Commits

Author SHA1 Message Date
Florent BEAUCHAMP
6fdf0a97e4 Simplify @xen-orchestra/backups/_runners/_writers/IncrementalRemoteWriter.mjs
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
2024-02-14 07:46:39 +01:00
Florent Beauchamp
3a4bdd3b1e fix(backups): ensure vhd are chained with older vhd 2024-02-13 12:31:22 +00:00
37 changed files with 141 additions and 472 deletions

View File

@@ -65,11 +65,10 @@ module.exports = {
typescript: true,
'eslint-import-resolver-custom-alias': {
alias: {
'@core': '../web-core/lib',
'@': './src',
},
extensions: ['.ts'],
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
packages: ['@xen-orchestra/lite'],
},
},
},

View File

@@ -160,10 +160,10 @@ export class ImportVmBackup {
// update the stream with the negative vhd stream
stream = await negativeVhd.stream()
vdis[vdiRef].baseVdi = snapshotCandidate
} catch (error) {
} catch (err) {
// can be a broken VHD chain, a vhd chain with a key backup, ....
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
warn(`can't use differential restore`, { error })
warn(`can't use differential restore`, err)
disposableDescendants?.dispose()
}
}

View File

@@ -191,14 +191,13 @@ export class RemoteAdapter {
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(VhdSynthetic.fromVhdChain(this.handler, path), vhd => {
return await Disposable.use(openVhd(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
// check if all the chain is composed of vhd directory
const isVhdDirectory = vhd.checkVhdsClass(VhdDirectory)
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.useVhdDirectory()

View File

@@ -2,7 +2,6 @@ import { asyncEach } from '@vates/async-each'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import assert from 'node:assert'
import * as UUID from 'uuid'
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
import mapValues from 'lodash/mapValues.js'
@@ -10,48 +9,11 @@ import { AbstractRemote } from './_AbstractRemote.mjs'
import { forkDeltaExport } from './_forkDeltaExport.mjs'
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
import { Task } from '../../Task.mjs'
import { Disposable } from 'promise-toolbox'
import { openVhd } from 'vhd-lib'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
_getRemoteWriter() {
return IncrementalRemoteWriter
}
async _selectBaseVm(metadata) {
// for each disk , get the parent
const baseUuidToSrcVdi = new Map()
// no previous backup for a base( =key) backup
if (metadata.isBase) {
return
}
await asyncEach(Object.entries(metadata.vdis), async ([id, vdi]) => {
const isDifferencing = metadata.isVhdDifferencing[`${id}.vhd`]
if (isDifferencing) {
const vmDir = getVmBackupDir(metadata.vm.uuid)
const path = `${vmDir}/${metadata.vhds[id]}`
// don't catch error : we can't recover if the source vhd are missing
await Disposable.use(openVhd(this._sourceRemoteAdapter._handler, path), vhd => {
baseUuidToSrcVdi.set(UUID.stringify(vhd.header.parentUuid), vdi.$snapshot_of$uuid)
})
}
})
const presentBaseVdis = new Map(baseUuidToSrcVdi)
await this._callWriters(
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis),
'writer.checkBaseVdis()',
false
)
// check if the parent vdi are present in all the remotes
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
throw new Error(`Missing vdi ${baseUuid} which is a base for a delta`)
}
})
// yeah , let's go
}
async _run($defer) {
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
await this._callWriters(async writer => {
@@ -64,7 +26,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
if (transferList.length > 0) {
for (const metadata of transferList) {
assert.strictEqual(metadata.mode, 'delta')
await this._selectBaseVm(metadata)
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
useChain: false,
@@ -88,17 +50,6 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
}),
'writer.transfer()'
)
// this will update parent name with the needed alias
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp: metadata.timestamp,
vdis: incrementalExport.vdis,
}),
'writer.updateUuidAndChain()'
)
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
// for healthcheck
this._tags = metadata.vm.tags

View File

@@ -78,18 +78,6 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
'writer.transfer()'
)
// we want to control the uuid of the vhd in the chain
// and ensure they are correctly chained
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp,
vdis: deltaExport.vdis,
}),
'writer.updateUuidAndChain()'
)
this._baseVm = exportedVm
if (baseVm !== undefined) {
@@ -145,7 +133,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
])
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(baseUuid, srcVdi.uuid)
baseUuidToSrcVdi.set(baseUuid, srcVdi)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
@@ -166,18 +154,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdiUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdiUuid,
vdi: srcVdi.uuid,
})
fullVdisRequired.add(srcVdiUuid)
fullVdisRequired.add(srcVdi.uuid)
}
})

View File

@@ -2,7 +2,6 @@ import assert from 'node:assert'
import groupBy from 'lodash/groupBy.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncMap } from '@xen-orchestra/async-map'
import { createLogger } from '@xen-orchestra/log'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { formatDateTime } from '@xen-orchestra/xapi'
@@ -11,8 +10,6 @@ import { getOldEntries } from '../../_getOldEntries.mjs'
import { Task } from '../../Task.mjs'
import { Abstract } from './_Abstract.mjs'
const { info, warn } = createLogger('xo:backups:AbstractXapi')
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
constructor({
config,
@@ -174,20 +171,6 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
}
}
// this will delete current snapshot in case of failure
// to ensure any retry will start with a clean state, especially in the case of rolling snapshots
#removeCurrentSnapshotOnFailure() {
if (this._mustDoSnapshot() && this._exportedVm !== undefined) {
info('will delete snapshot on failure', { vm: this._vm, snapshot: this._exportedVm })
assert.notStrictEqual(
this._vm.$ref,
this._exportedVm.$ref,
'there should have a snapshot, but vm and snapshot have the same ref'
)
return this._xapi.VM_destroy(this._exportedVm.$ref)
}
}
async _fetchJobSnapshots() {
const jobId = this._jobId
const vmRef = this._vm.$ref
@@ -288,17 +271,11 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
await this._exportedVm.update_blocked_operations({ pool_migrate, migrate_send })
}
}
} catch (error) {
try {
await this.#removeCurrentSnapshotOnFailure()
} catch (removeSnapshotError) {
warn('fail removing current snapshot', { error: removeSnapshotError })
}
throw error
} finally {
if (startAfter) {
ignoreErrors.call(vm.$callAsync('start', false, false))
}
await this._fetchJobSnapshots()
await this._removeUnusedSnapshots()
}

View File

@@ -1,15 +1,17 @@
import assert from 'node:assert'
import mapValues from 'lodash/mapValues.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { chainVhd, openVhd } from 'vhd-lib'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { dirname, basename } from 'node:path'
import { dirname, basename as pathBasename } from 'node:path'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
@@ -21,45 +23,42 @@ import { Disposable } from 'promise-toolbox'
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
#parentVdiPaths
#vhds
async checkBaseVdis(baseUuidToSrcVdi) {
this.#parentVdiPaths = {}
const { handler } = this._adapter
const adapter = this._adapter
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdiUuid]) => {
let parentDestPath
const vhdDir = `${vdisDir}/${srcVdiUuid}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
try {
const vhds = await handler.list(vhdDir, {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
// the last one is probably the right one
for (let i = vhds.length - 1; i >= 0 && parentDestPath === undefined; i--) {
const path = vhds[i]
await asyncMap(vhds, async path => {
try {
if (await adapter.isMergeableParent(packedBaseUuid, path)) {
parentDestPath = path
}
await checkVhdChain(handler, path)
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
//
// since all the checks of a path are done in parallel, found would be containing
// only the last answer of isMergeableParent which is probably not the right one
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
found = found || isMergeable
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
}
}
})
} catch (error) {
warn('checkBaseVdis', { error })
}
// no usable parent => the runner will have to decide to fall back to a full or stop backup
if (parentDestPath === undefined) {
if (!found) {
baseUuidToSrcVdi.delete(baseUuid)
} else {
this.#parentVdiPaths[vhdDir] = parentDestPath
}
})
}
@@ -124,44 +123,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
}
}
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
assert.notStrictEqual(
this.#vhds,
undefined,
'_transfer must be called before updateUuidAndChain for incremental backups'
)
const parentVdiPaths = this.#parentVdiPaths
const { handler } = this._adapter
const vhds = this.#vhds
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
const path = `${this._vmBackupDir}/${vhds[id]}`
if (isDifferencing) {
assert.notStrictEqual(
parentVdiPaths,
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
)
const parentPath = parentVdiPaths[dirname(path)]
// we are in a incremental backup
// we already computed the chain in checkBaseVdis
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
// forbid any kind of loop
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD if needed
await Disposable.use(openVhd(handler, path), async vhd => {
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
}
})
})
}
async _deleteOldEntries() {
const adapter = this._adapter
const oldEntries = this._oldEntries
@@ -180,10 +141,14 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
const jobId = job.id
const handler = adapter.handler
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// @todo : should skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
}
const basename = formatFilenameDate(timestamp)
// update this.#vhds before eventually skipping transfer, so that
// updateUuidAndChain has all the mandatory data
const vhds = (this.#vhds = mapValues(
const vhds = mapValues(
deltaExport.vdis,
vdi =>
`vdis/${jobId}/${
@@ -193,15 +158,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vdi.uuid
: vdi.$snapshot_of$uuid
}/${adapter.getVhdFileName(basename)}`
))
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
return { size: 0 }
}
)
metadataContent = {
isVhdDifferencing,
@@ -217,13 +174,41 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vm,
vmSnapshot,
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await asyncEach(
Object.keys(deltaExport.vdis),
async id => {
Object.entries(deltaExport.vdis),
async ([id, vdi]) => {
const path = `${this._vmBackupDir}/${vhds[id]}`
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
let parentPath
if (isDifferencing) {
const vdiDir = dirname(path)
parentPath = (
await handler.list(vdiDir, {
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
prependDir: true,
})
)
.sort()
.pop()
assert.notStrictEqual(
parentPath,
undefined,
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
)
assert.ok(
pathBasename(parentPath) < pathBasename(path),
`vhd must be sorted to be chained`
)
parentPath = parentPath.slice(1) // remove leading slash
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
// don't write it as transferSize += await async function
// since i += await asyncFun lead to race condition
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
@@ -235,6 +220,17 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
writeBlockConcurrency: this._config.writeBlockConcurrency,
})
transferSize += transferSizeOneDisk
if (isDifferencing) {
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD
await Disposable.use(openVhd(handler, path), async vhd => {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
},
{
concurrency: settings.diskPerVmConcurrency,

View File

@@ -1,4 +1,3 @@
import assert from 'node:assert'
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { formatDateTime } from '@xen-orchestra/xapi'
@@ -15,7 +14,6 @@ import find from 'lodash/find.js'
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
assert.notStrictEqual(baseVm, undefined)
const sr = this._sr
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
@@ -38,9 +36,7 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
}
}
}
updateUuidAndChain() {
// nothing to do, the chaining is not modified in this case
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({

View File

@@ -5,10 +5,6 @@ export class AbstractIncrementalWriter extends AbstractWriter {
throw new Error('Not implemented')
}
updateUuidAndChain() {
throw new Error('Not implemented')
}
cleanup() {
throw new Error('Not implemented')
}

View File

@@ -113,13 +113,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
)
}
async _isAlreadyTransferred(timestamp) {
_isAlreadyTransferred(timestamp) {
const vmUuid = this._vmUuid
const adapter = this._adapter
const backupDir = getVmBackupDir(vmUuid)
try {
const actualMetadata = JSON.parse(
await adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
)
return actualMetadata
} catch (error) {}

View File

@@ -230,7 +230,6 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
- `cleanup()`
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
- **Full**

View File

@@ -20,7 +20,5 @@ export function split(path) {
return parts
}
// paths are made absolute otherwise fs.relative() would resolve them against working directory
export const relativeFromFile = (file, path) => relative(dirname(normalize(file)), normalize(path))
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,17 +0,0 @@
import { describe, it } from 'test'
import { strict as assert } from 'assert'
import { relativeFromFile } from './path.js'
describe('relativeFromFile()', function () {
for (const [title, args] of Object.entries({
'file absolute and path absolute': ['/foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file relative and path absolute': ['foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file absolute and path relative': ['/foo/bar/file.vhd', 'foo/baz/path.vhd'],
'file relative and path relative': ['foo/bar/file.vhd', 'foo/baz/path.vhd'],
})) {
it('works with ' + title, function () {
assert.equal(relativeFromFile(...args), '../baz/path.vhd')
})
}
})

View File

@@ -54,10 +54,10 @@ async function handleExistingFile(root, indexPath, path) {
await indexFile(fullPath, indexPath)
}
}
} catch (error) {
if (error.code !== 'EEXIST') {
} catch (err) {
if (err.code !== 'EEXIST') {
// there can be a symbolic link in the tree
warn('handleExistingFile', { error })
warn('handleExistingFile', err)
}
}
}
@@ -106,7 +106,7 @@ export async function watchRemote(remoteId, { root, immutabilityDuration, rebuil
await File.liftImmutability(settingPath)
} catch (error) {
// file may not exists, and it's not really a problem
info('lifting immutability on current settings', { error })
info('lifting immutability on current settings', error)
}
await fs.writeFile(
settingPath,

View File

@@ -50,17 +50,7 @@ const RRD_POINTS_PER_STEP: { [key in RRD_STEP]: number } = {
// Utils
// -------------------------------------------------------------------
function parseNumber(value: number | string) {
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
// strings to support NaN, Infinity and -Infinity
if (typeof value === 'string') {
const asNumber = +value
if (isNaN(asNumber) && value !== 'NaN') {
throw new Error('cannot parse number: ' + value)
}
value = asNumber
}
function convertNanToNull(value: number) {
return isNaN(value) ? null : value
}
@@ -69,7 +59,7 @@ function parseNumber(value: number | string) {
// -------------------------------------------------------------------
const computeValues = (dataRow: any, legendIndex: number, transformValue = identity) =>
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
const createGetProperty = (obj: object, property: string, defaultValue: unknown) =>
defaults(obj, { [property]: defaultValue })[property] as any
@@ -329,14 +319,8 @@ export default class XapiStats {
},
abortSignal,
})
const text = await resp.text()
try {
// starting from XAPI 23.31, the response is valid JSON
return JSON.parse(text)
} catch (error) {
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
return JSON5.parse(text)
}
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
return JSON5.parse(await resp.text())
}
// To avoid multiple requests, we keep a cache for the stats and
@@ -399,10 +383,7 @@ export default class XapiStats {
abortSignal,
})
const actualStep = parseNumber(json.meta.step)
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
const actualStep = json.meta.step as number
if (json.data.length > 0) {
// fetched data is organized from the newest to the oldest
@@ -426,15 +407,14 @@ export default class XapiStats {
let stepStats = xoObjectStats[actualStep]
let cacheStepStats = cacheXoObjectStats[actualStep]
const endTimestamp = parseNumber(json.meta.end)
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
stepStats = xoObjectStats[actualStep] = {
endTimestamp,
endTimestamp: json.meta.end,
interval: actualStep,
canBeExpired: false,
}
cacheStepStats = cacheXoObjectStats[actualStep] = {
endTimestamp,
endTimestamp: json.meta.end,
interval: actualStep,
canBeExpired: true,
}
@@ -458,6 +438,10 @@ export default class XapiStats {
})
})
}
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
return

View File

@@ -1,15 +1,13 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "src/**/*", "src/**/*.vue", "../web-core/lib/**/*", "../web-core/lib/**/*.vue"],
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
"@/*": ["./src/*"]
}
}
}

View File

@@ -23,7 +23,6 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},

View File

@@ -27,16 +27,6 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -45,16 +45,6 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -10,8 +10,7 @@
}
},
"devDependencies": {
"vue": "^3.4.13",
"@vue/tsconfig": "^0.5.1"
"vue": "^3.4.13"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/web-core",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
@@ -26,6 +25,6 @@
},
"license": "AGPL-3.0-or-later",
"engines": {
"node": ">=18"
"node": ">=8.10"
}
}

View File

@@ -1,12 +0,0 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "lib/**/*", "lib/**/*.vue"],
"exclude": ["lib/**/__tests__/*"],
"compilerOptions": {
"noEmit": true,
"baseUrl": ".",
"paths": {
"@core/*": ["./lib/*"]
}
}
}

View File

@@ -1,22 +1,13 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": [
"env.d.ts",
"typed-router.d.ts",
"src/**/*",
"src/**/*.vue",
"../web-core/lib/**/*",
"../web-core/lib/**/*.vue"
],
"include": ["env.d.ts", "typed-router.d.ts", "src/**/*", "src/**/*.vue"],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
"@/*": ["./src/*"]
}
}
}

View File

@@ -11,7 +11,6 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},
})

View File

@@ -21,23 +21,12 @@ export default class Vif {
MAC = '',
} = {}
) {
if (device === undefined) {
const allowedDevices = await this.call('VM.get_allowed_VIF_devices', VM)
if (allowedDevices.length === 0) {
const error = new Error('could not find an allowed VIF device')
error.poolUuid = this.pool.uuid
error.vmRef = VM
throw error
}
device = allowedDevices[0]
}
const [powerState, ...rest] = await Promise.all([
this.getField('VM', VM, 'power_state'),
MTU ?? this.getField('network', network, 'MTU'),
device ?? (await this.call('VM.get_allowed_VIF_devices', VM))[0],
MTU ?? (await this.getField('network', network, 'MTU')),
])
;[MTU] = rest
;[device, MTU] = rest
const vifRef = await this.call('VIF.create', {
currently_attached: powerState === 'Suspended' ? currently_attached : undefined,

View File

@@ -8,11 +8,6 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- Disable search engine indexing via a `robots.txt`
- [Stats] Support format used by XAPI 23.31
- [REST API] Export host [SMT](https://en.wikipedia.org/wiki/Simultaneous_multithreading) status at `/hosts/:id/smt` [Forum#71374](https://xcp-ng.org/forum/post/71374)
- [Home & REST API] `$container` field of an halted VM now points to a host if a VDI is on a local storage [Forum#71769](https://xcp-ng.org/forum/post/71769)
- [Size Input] Ability to select two new units in the dropdown (`TiB`, `PiB`) (PR [#7382](https://github.com/vatesfr/xen-orchestra/pull/7382))
### Bug fixes
@@ -21,8 +16,6 @@
- [Settings/XO Config] Sort backups from newest to oldest
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
- [Remotes] Correctly clear error when the remote is tested with success
- [Import/VMWare] Fix importing last snapshot (PR [#7370](https://github.com/vatesfr/xen-orchestra/pull/7370))
- [Host/Reboot] Fix false positive warning when restarting an host after updates (PR [#7366](https://github.com/vatesfr/xen-orchestra/pull/7366))
### Packages to release
@@ -41,11 +34,8 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- @xen-orchestra/fs patch
- @xen-orchestra/xapi patch
- vhd-lib patch
- xo-server minor
- xo-server patch
- xo-server-audit patch
- xo-web minor
- xo-web patch
<!--packages-end-->

View File

@@ -93,21 +93,6 @@ Follow the instructions:
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
If you want to use static IP address for your appliance:
```sh
xe vm-param-set uuid="$uuid" \
xenstore-data:vm-data/ip="$ip" \
xenstore-data:vm-data/netmask="$netmask" \
xenstore-data:vm-data/gateway="$gateway"
```
If you want to replace the default DNS server:
```sh
xe vm-param-set uuid="$uuid" xenstore-data:vm-data/dns="$dns"
```
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
## First console connection

View File

@@ -46,9 +46,9 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
}
get compressionType() {
const compressionType = this.#vhds[0].compressionType
for (let i = 0; i < this.#vhds.length; i++) {
if (compressionType !== this.#vhds[i].compressionType) {
const compressionType = this.vhds[0].compressionType
for (let i = 0; i < this.vhds.length; i++) {
if (compressionType !== this.vhds[i].compressionType) {
return 'MIXED'
}
}

View File

@@ -1,6 +1,6 @@
'use strict'
const { relativeFromFile } = require('@xen-orchestra/fs/path')
const { dirname, relative } = require('path')
const { openVhd } = require('./openVhd')
const { DISK_TYPES } = require('./_constants')
@@ -21,7 +21,7 @@ module.exports = async function chain(parentHandler, parentPath, childHandler, c
}
await childVhd.readBlockAllocationTable()
const parentName = relativeFromFile(childPath, parentPath)
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)

View File

@@ -27,7 +27,7 @@ async function sendToNagios(app, jobName, vmBackupInfo) {
jobName
)
} catch (error) {
warn('sendToNagios:', { error })
warn('sendToNagios:', error)
}
}

View File

@@ -1,4 +1,3 @@
import semver from 'semver'
import { createLogger } from '@xen-orchestra/log'
import assert from 'assert'
import { format } from 'json-rpc-peer'
@@ -137,38 +136,13 @@ export async function restart({
const pool = this.getObject(host.$poolId, 'pool')
const master = this.getObject(pool.master, 'host')
const hostRebootRequired = host.rebootRequired
// we are currently in an host upgrade process
if (hostRebootRequired && host.id !== master.id) {
// this error is not ideal but it means that the pool master must be fully upgraded/rebooted before the current host can be rebooted.
//
// there is a single error for the 3 cases because the client must handle them the same way
const throwError = () =>
incorrectState({
actual: hostRebootRequired,
expected: false,
object: master.id,
property: 'rebootRequired',
})
if (semver.lt(master.version, host.version)) {
log.error(`master version (${master.version}) is older than the host version (${host.version})`, {
masterId: master.id,
hostId: host.id,
})
throwError()
}
if (semver.eq(master.version, host.version)) {
if ((await this.getXapi(host).listMissingPatches(master._xapiId)).length > 0) {
log.error('master has missing patches', { masterId: master.id })
throwError()
}
if (master.rebootRequired) {
log.error('master needs to reboot')
throwError()
}
}
if (hostRebootRequired && host.id !== master.id && host.version === master.version) {
throw incorrectState({
actual: hostRebootRequired,
expected: false,
object: master.id,
property: 'rebootRequired',
})
}
}

View File

@@ -328,34 +328,6 @@ const TRANSFORMS = {
const { creation } = xoData.extract(obj) ?? {}
let $container
if (obj.resident_on !== 'OpaqueRef:NULL') {
// resident_on is set when the VM is running (or paused or suspended on a host)
$container = link(obj, 'resident_on')
} else {
// if the VM is halted, the $container is the pool
$container = link(obj, 'pool')
// unless one of its VDI is on a non shared SR
//
// linked objects may not be there when this code run, and it will only be
// refreshed when the VM XAPI record change, this value is not guaranteed
// to be up-to-date, but it practice it appears to work fine thanks to
// `VBDs` and `current_operations` changing when a VDI is
// added/removed/migrated
for (const vbd of obj.$VBDs) {
const sr = vbd?.$VDI?.$SR
if (sr !== undefined && !sr.shared) {
const pbd = sr.$PBDs[0]
const hostId = pbd && link(pbd, 'host')
if (hostId !== undefined) {
$container = hostId
break
}
}
}
}
const vm = {
// type is redefined after for controllers/, templates &
// snapshots.
@@ -450,7 +422,8 @@ const TRANSFORMS = {
xenTools,
...getVmGuestToolsProps(obj),
$container,
// TODO: handle local VMs (`VM.get_possible_hosts()`).
$container: isRunning ? link(obj, 'resident_on') : link(obj, 'pool'),
$VBDs: link(obj, 'VBDs'),
// TODO: dedupe

View File

@@ -45,17 +45,7 @@ const RRD_POINTS_PER_STEP = {
// Utils
// -------------------------------------------------------------------
function parseNumber(value) {
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
// strings to support NaN, Infinity and -Infinity
if (typeof value === 'string') {
const asNumber = +value
if (isNaN(asNumber) && value !== 'NaN') {
throw new Error('cannot parse number: ' + value)
}
value = asNumber
}
function convertNanToNull(value) {
return isNaN(value) ? null : value
}
@@ -68,7 +58,7 @@ async function getServerTimestamp(xapi, hostRef) {
// -------------------------------------------------------------------
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
const combineStats = (stats, path, combineValues) => zipWith(...map(stats, path), (...values) => combineValues(values))
@@ -255,15 +245,7 @@ export default class XapiStats {
start: timestamp,
},
})
.then(response => response.text())
.then(data => {
try {
// starting from XAPI 23.31, the response is valid JSON
return JSON.parse(data)
} catch (_) {
return JSON5.parse(data)
}
})
.then(response => response.text().then(JSON5.parse))
.catch(err => {
delete this.#hostCache[hostUuid][step]
throw err
@@ -317,7 +299,7 @@ export default class XapiStats {
// To avoid crossing over the boundary, we ask for one less step
const optimumTimestamp = currentTimeStamp - maxDuration + step
const json = await this._getJson(xapi, host, optimumTimestamp, step)
const actualStep = parseNumber(json.meta.step)
const actualStep = json.meta.step
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
@@ -344,10 +326,9 @@ export default class XapiStats {
return
}
const endTimestamp = parseNumber(json.meta.end)
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
stepStats = {
endTimestamp,
endTimestamp: json.meta.end,
interval: actualStep,
stats: {},
}

View File

@@ -280,7 +280,7 @@ export default class MigrateVm {
const stream = vhd.stream()
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
}
return { vdi, vhd }
return vhd
})
)
)

View File

@@ -1,6 +1,5 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy.js'
import { basename } from 'path'
import { createLogger } from '@xen-orchestra/log'
import { format, parse } from 'xo-remote-parser'
import {
DEFAULT_ENCRYPTION_ALGORITHM,
@@ -18,35 +17,17 @@ import { Remotes } from '../models/remote.mjs'
// ===================================================================
const { warn } = createLogger('xo:mixins:remotes')
const obfuscateRemote = ({ url, ...remote }) => {
const parsedUrl = parse(url)
remote.url = format(sensitiveValues.obfuscate(parsedUrl))
return remote
}
// these properties should be defined on the remote object itself and not as
// part of the remote URL
//
// there is a bug somewhere that keep putting them into the URL, this list
// is here to help track it
const INVALID_URL_PARAMS = ['benchmarks', 'id', 'info', 'name', 'proxy', 'enabled', 'error', 'url']
function validateUrl(url) {
const parsedUrl = parse(url)
const { path } = parsedUrl
function validatePath(url) {
const { path } = parse(url)
if (path !== undefined && basename(path) === 'xo-vm-backups') {
throw invalidParameters('remote url should not end with xo-vm-backups')
}
for (const param of INVALID_URL_PARAMS) {
if (Object.hasOwn(parsedUrl, param)) {
// log with stack trace
warn(new Error('invalid remote URL param ' + param))
}
}
}
export default class {
@@ -201,22 +182,6 @@ export default class {
if (remote === undefined) {
throw noSuchObject(id, 'remote')
}
const parsedUrl = parse(remote.url)
let fixed = false
for (const param of INVALID_URL_PARAMS) {
if (Object.hasOwn(parsedUrl, param)) {
// delete the value to trace its real origin when it's added back
// with `updateRemote()`
delete parsedUrl[param]
fixed = true
}
}
if (fixed) {
remote.url = format(parsedUrl)
this._remotes.update(remote).catch(warn)
}
return remote
}
@@ -237,7 +202,7 @@ export default class {
}
async createRemote({ name, options, proxy, url }) {
validateUrl(url)
validatePath(url)
const params = {
enabled: false,
@@ -254,10 +219,6 @@ export default class {
}
updateRemote(id, { enabled, name, options, proxy, url }) {
if (url !== undefined) {
validateUrl(url)
}
const handlers = this._handlers
const handler = handlers[id]
if (handler !== undefined) {
@@ -277,7 +238,7 @@ export default class {
@synchronized()
async _updateRemote(id, { url, ...props }) {
if (url !== undefined) {
validateUrl(url)
validatePath(url)
}
const remote = await this._getRemote(id)

View File

@@ -253,10 +253,6 @@ export default class RestApi {
const host = req.xapiObject
res.json(await host.$xapi.listMissingPatches(host))
},
async smt({ xapiObject }, res) {
res.json({ enabled: await xapiObject.$xapi.isHyperThreadingEnabled(xapiObject.$id) })
},
}
collections.pools.routes = {

View File

@@ -138,7 +138,7 @@ export class Range extends Component {
export Toggle from './toggle'
const UNITS = ['kiB', 'MiB', 'GiB', 'TiB', 'PiB']
const UNITS = ['kiB', 'MiB', 'GiB']
const DEFAULT_UNIT = 'GiB'
export class SizeInput extends BaseComponent {

View File

@@ -75,7 +75,7 @@ export const reportOnSupportPanel = async ({ files = [], formatMessage = identit
ADDITIONAL_FILES.map(({ fetch, name }) =>
timeout.call(fetch(), ADDITIONAL_FILES_FETCH_TIMEOUT).then(
file => formData.append('attachments', createBlobFromString(file), name),
error => logger.warn(`cannot get ${name}`, { error })
error => logger.warn(`cannot get ${name}`, error)
)
)
)