Compare commits

..

41 Commits

Author SHA1 Message Date
Florent Beauchamp
26c9338f54 feat: data_destroy 2024-02-21 16:54:46 +00:00
Florent Beauchamp
4ae0e3912f fixes : backup runs 2024-02-21 15:45:43 +00:00
Florent Beauchamp
c3571325c5 feat(backups): use CBT if selected 2024-02-21 14:54:41 +00:00
Florent Beauchamp
9d56ab2a00 feat:add ux 2024-02-21 14:54:41 +00:00
Florent Beauchamp
b3e163d090 remove broken import 2024-02-21 14:54:41 +00:00
Florent Beauchamp
1e0c411d5f feat(backups): destroy data of cbt enabled snapshots 2024-02-21 14:54:41 +00:00
Florent Beauchamp
d30b5950fc feat(backup): use cbt in exports incremental vm 2024-02-21 14:54:41 +00:00
Florent Beauchamp
98ec3f4c5e feat(xapi,vhd-lib): implement cbt for reading changed data 2024-02-21 14:54:41 +00:00
Florent Beauchamp
ff57bc2a0b feat(xapi): implement Change Block Tracking function 2024-02-21 14:54:39 +00:00
Florent Beauchamp
ee0fd9ab8e feat(nbd-client): implement buffer passthrough in read block 2024-02-21 14:54:12 +00:00
Mathieu
039d5687c0 fix(xo-server/host): fix false positives when restarting host after updates (#7366)
The previous implementation only considered version upgrades and did not take into account the installation of missing patches.

See zammad#21487
Introduced by 85ec261
2024-02-21 15:05:05 +01:00
Florent Beauchamp
b89195eb80 fix(backups/IncrementalRemote): ensure chaining is ok and mutualize code with IncrementalXapi 2024-02-21 10:27:56 +01:00
Florent Beauchamp
822cdc3fb8 refactor(backups/IncrementalRemoteWriter): reuse parent path from checkBaseVdis 2024-02-21 10:27:56 +01:00
Florent Beauchamp
c7b5b715a3 refactor(backups/checkBaseVdi): use uuid, don't check vhd multiple times 2024-02-21 10:27:56 +01:00
Florent Beauchamp
56b427c09c fix(vhd-lib/VhdSynthetic): compression type computation 2024-02-21 10:27:56 +01:00
Mathieu
0e45c52bbc feat(lite/xapi-stats): handle new format (#7383)
Similar to 757a8915d9

Starting from XAPI 23.31, stats are in valid JSON but numbers are encoded as strings.
2024-02-20 17:55:57 +01:00
Mathieu
4fd2b91fc4 feat(xo-web/SizeInput): added 'TiB' and 'PiB' units (#7382) 2024-02-20 17:43:21 +01:00
Florent BEAUCHAMP
7890320a7d fix(xo-server/import): error during import of last snapshot of running VM (#7370)
From zammad#21710

Introduced by 2d047c4fef
2024-02-20 17:39:39 +01:00
Julien Fontanet
1718649e0c feat(xo-server/vm.$container): points to host if VDI on local SR
Fixes https://xcp-ng.org/forum/post/71769
2024-02-20 16:49:53 +01:00
Julien Fontanet
7fc5d62ca9 feat(xo-server/rest-api): export hosts' SMT status
Fixes https://xcp-ng.org/forum/post/71374
2024-02-20 16:33:33 +01:00
Julien Fontanet
eedaca0195 feat(xo-server/remotes): detect, log and fix incorrect params (#7343) 2024-02-16 16:23:06 +01:00
Julien Fontanet
9ffa52cc01 docs(xoa): manual network config 2024-02-16 11:25:34 +01:00
Julien Fontanet
e9a23755b6 test(fs/path/normalizePath): test relative paths handling
Related to 5712f29a5
2024-02-15 10:10:44 +01:00
Julien Fontanet
5712f29a58 fix(vhd-lib/chainVhd): correctly handle relative paths 2024-02-15 09:14:32 +01:00
Julien Fontanet
509ebf900e fix(fs/path/relativeFromFile): correctly handle relative paths 2024-02-15 09:13:10 +01:00
Julien Fontanet
757a8915d9 feat(xo-server/xapi-stats): handle new format
Starting from XAPI 23.31, stats are in valid JSON but numbers are encoded as strings.
2024-02-14 16:14:43 +01:00
Thierry Goettelmann
35c660dbf6 feat(xo-stack): add @core alias to import Core from Web and Lite (#7375) 2024-02-14 14:43:23 +01:00
Julien Fontanet
f23fd69e7e fix(xapi/VIF_create): fetch power_state and MTU in parallel 2024-02-14 11:48:07 +01:00
Julien Fontanet
39c10a7197 fix(xapi/VIF_create): explicit error when no allowed devices
Related to #7380
2024-02-14 11:48:07 +01:00
Julien Fontanet
7a1bc16468 fix: respect logger method signature
This is a minor fix that should not have major impacts.

It's not necessary to release impacted packages.
2024-02-13 17:38:03 +01:00
Julien Fontanet
93dd1a63da docs(log): document method signature 2024-02-13 17:35:58 +01:00
Florent Beauchamp
b4e1064914 fix(backups): _isAlreadyTransferred is async
This leads to a retransfer and a EEXIST error while writing the metadata.

It can happen when a mirror transfer to multiple remotes, fails on one remote and is restarted/resumed.
2024-02-13 16:03:45 +01:00
Florent Beauchamp
810cdc1a77 fix(backups): really skip already transferred backups 2024-02-13 16:03:45 +01:00
Julien Fontanet
1023131828 chore: update dev deps 2024-02-12 20:47:05 +01:00
Smultar
e2d83324ac chore: add name and version to root package.json (#7372)
Fixes #7371
2024-02-12 16:59:50 +01:00
Julien Fontanet
7cea445c21 fix(xo-web/remotes): don't merge all properties into url
Related to #7343

Introduced by fb1bf6a1e7
2024-02-12 14:51:04 +01:00
Julien Fontanet
b5d9d9a9e1 fix(xo-server-audit): ignore tag.getAllConfigured
Introduced by 25e270edb4
2024-02-12 10:58:06 +01:00
Julien Fontanet
3a4e9b8f8e chore(xo-web/config): remove unused computeds
Introduced by 01302d7a60
2024-02-12 10:55:58 +01:00
Julien Fontanet
92efd28b33 fix(xo-web/config): sort backups from newest to oldest
Introduced by 01302d7a60
2024-02-12 10:55:26 +01:00
Julien Fontanet
a2c36c0832 feat(xo-server): add robots.txt
Fixes zammad#21489
2024-02-09 11:25:06 +01:00
Florent BEAUCHAMP
2eb49cfdf1 feat: release 5.91.2 (#7367) 2024-02-09 11:10:59 +01:00
55 changed files with 1951 additions and 1401 deletions

View File

@@ -65,10 +65,11 @@ module.exports = {
typescript: true,
'eslint-import-resolver-custom-alias': {
alias: {
'@core': '../web-core/lib',
'@': './src',
},
extensions: ['.ts'],
packages: ['@xen-orchestra/lite'],
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
},
},
},

View File

@@ -61,22 +61,23 @@ export default class MultiNbdClient {
async *readBlocks(indexGenerator) {
// default : read all blocks
const readAhead = []
const makeReadBlockPromise = (index, size) => {
const promise = this.readBlock(index, size)
const makeReadBlockPromise = (index, buffer, size) => {
// pass through any pre loaded buffer
const promise = buffer ? Promise.resolve(buffer) : this.readBlock(index, size)
// error is handled during unshift
promise.catch(() => {})
return promise
}
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
for (const { index, size } of indexGenerator()) {
for (const { index, buffer, size } of indexGenerator()) {
// stack readAheadMaxLength promises before starting to handle the results
if (readAhead.length === this.#readAhead) {
// any error will stop reading blocks
yield readAhead.shift()
}
readAhead.push(makeReadBlockPromise(index, size))
readAhead.push(makeReadBlockPromise(index, buffer, size))
}
while (readAhead.length > 0) {
yield readAhead.shift()

View File

@@ -160,10 +160,10 @@ export class ImportVmBackup {
// update the stream with the negative vhd stream
stream = await negativeVhd.stream()
vdis[vdiRef].baseVdi = snapshotCandidate
} catch (err) {
} catch (error) {
// can be a broken VHD chain, a vhd chain with a key backup, ....
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
warn(`can't use differential restore`, err)
warn(`can't use differential restore`, { error })
disposableDescendants?.dispose()
}
}

View File

@@ -191,13 +191,14 @@ export class RemoteAdapter {
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(openVhd(this.handler, path), vhd => {
return await Disposable.use(VhdSynthetic.fromVhdChain(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
const isVhdDirectory = vhd instanceof VhdDirectory
// check if all the chain is composed of vhd directory
const isVhdDirectory = vhd.checkVhdsClass(VhdDirectory)
return isVhdDirectory
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.useVhdDirectory()

View File

@@ -79,9 +79,16 @@ export async function exportIncrementalVm(
$SR$uuid: vdi.$SR.uuid,
}
let changedBlocks
console.log('CBT ? ', vdi.cbt_enabled,vdiRef,baseVdi?.$ref)
if (vdi.cbt_enabled && baseVdi?.$ref) {
// @todo log errors and fallback to default mode
changedBlocks = await vdi.$listChangedBlock(baseVdi?.$ref)
}
streams[`${vdiRef}.vhd`] = await vdi.$exportContent({
baseRef: baseVdi?.$ref,
cancelToken,
changedBlocks,
format: 'vhd',
nbdConcurrency,
preferNbd,

View File

@@ -2,6 +2,7 @@ import { asyncEach } from '@vates/async-each'
import { decorateMethodsWith } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import assert from 'node:assert'
import * as UUID from 'uuid'
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
import mapValues from 'lodash/mapValues.js'
@@ -9,11 +10,48 @@ import { AbstractRemote } from './_AbstractRemote.mjs'
import { forkDeltaExport } from './_forkDeltaExport.mjs'
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
import { Task } from '../../Task.mjs'
import { Disposable } from 'promise-toolbox'
import { openVhd } from 'vhd-lib'
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
_getRemoteWriter() {
return IncrementalRemoteWriter
}
async _selectBaseVm(metadata) {
// for each disk , get the parent
const baseUuidToSrcVdi = new Map()
// no previous backup for a base( =key) backup
if (metadata.isBase) {
return
}
await asyncEach(Object.entries(metadata.vdis), async ([id, vdi]) => {
const isDifferencing = metadata.isVhdDifferencing[`${id}.vhd`]
if (isDifferencing) {
const vmDir = getVmBackupDir(metadata.vm.uuid)
const path = `${vmDir}/${metadata.vhds[id]}`
// don't catch error : we can't recover if the source vhd are missing
await Disposable.use(openVhd(this._sourceRemoteAdapter._handler, path), vhd => {
baseUuidToSrcVdi.set(UUID.stringify(vhd.header.parentUuid), vdi.$snapshot_of$uuid)
})
}
})
const presentBaseVdis = new Map(baseUuidToSrcVdi)
await this._callWriters(
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis),
'writer.checkBaseVdis()',
false
)
// check if the parent vdi are present in all the remotes
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
throw new Error(`Missing vdi ${baseUuid} which is a base for a delta`)
}
})
// yeah , let's go
}
async _run($defer) {
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
await this._callWriters(async writer => {
@@ -26,7 +64,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
if (transferList.length > 0) {
for (const metadata of transferList) {
assert.strictEqual(metadata.mode, 'delta')
await this._selectBaseVm(metadata)
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
useChain: false,
@@ -50,6 +88,17 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
}),
'writer.transfer()'
)
// this will update parent name with the needed alias
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp: metadata.timestamp,
vdis: incrementalExport.vdis,
}),
'writer.updateUuidAndChain()'
)
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
// for healthcheck
this._tags = metadata.vm.tags

View File

@@ -78,6 +78,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
'writer.transfer()'
)
// we want to control the uuid of the vhd in the chain
// and ensure they are correctly chained
await this._callWriters(
writer =>
writer.updateUuidAndChain({
isVhdDifferencing,
timestamp,
vdis: deltaExport.vdis,
}),
'writer.updateUuidAndChain()'
)
this._baseVm = exportedVm
if (baseVm !== undefined) {
@@ -133,7 +145,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
])
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(baseUuid, srcVdi)
baseUuidToSrcVdi.set(baseUuid, srcVdi.uuid)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
@@ -154,18 +166,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
vdi: srcVdiUuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
vdi: srcVdiUuid,
})
fullVdisRequired.add(srcVdi.uuid)
fullVdisRequired.add(srcVdiUuid)
}
})

View File

@@ -193,6 +193,17 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
const allSettings = this.job.settings
const baseSettings = this._baseSettings
const baseVmRef = this._baseVm?.$ref
if (this._settings.deltaComputeMode === 'CBT' && this._exportedVm?.$ref && this._exportedVm?.$ref != this._vm.$ref) {
console.log('WILL PURGE',this._exportedVm?.$ref)
const xapi = this._xapi
const vdiRefs = await this._xapi.VM_getDisks(this._exportedVm?.$ref)
await xapi.call('VM.destroy',this._exportedVm.$ref)
// @todo: ensure it is really the snapshot
for (const vdiRef of vdiRefs) {
// @todo handle error
await xapi.VDI_dataDestroy(vdiRef)
}
}
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const xapi = this._xapi
@@ -208,6 +219,8 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
}
})
})
}
async copy() {
@@ -226,6 +239,22 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
throw new Error('Not implemented')
}
async enableCbt() {
// for each disk of the VM , enable CBT
if (this._settings.deltaComputeMode !== 'CBT') {
return
}
const vm = this._vm
const xapi = this._xapi
console.log(vm.VBDs)
const vdiRefs = await vm.$getDisks(vm.VBDs)
for (const vdiRef of vdiRefs) {
// @todo handle error
await xapi.VDI_enableChangeBlockTracking(vdiRef)
}
// @todo : when do we disable CBT ?
}
async run($defer) {
const settings = this._settings
assert(
@@ -246,7 +275,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
await this._cleanMetadata()
await this._removeUnusedSnapshots()
await this.enableCbt()
const vm = this._vm
const isRunning = vm.power_state === 'Running'
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
@@ -267,6 +296,7 @@ export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
await this._exportedVm.update_blocked_operations({ pool_migrate: reason, migrate_send: reason })
try {
await this._copy()
// @todo if CBT is enabled : should call vdi.datadestroy on snapshot here
} finally {
await this._exportedVm.update_blocked_operations({ pool_migrate, migrate_send })
}

View File

@@ -1,17 +1,15 @@
import assert from 'node:assert'
import mapValues from 'lodash/mapValues.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { asyncEach } from '@vates/async-each'
import { asyncMap } from '@xen-orchestra/async-map'
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
import { chainVhd, openVhd } from 'vhd-lib'
import { createLogger } from '@xen-orchestra/log'
import { decorateClass } from '@vates/decorate-with'
import { defer } from 'golike-defer'
import { dirname } from 'node:path'
import { dirname, basename } from 'node:path'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
@@ -23,42 +21,45 @@ import { Disposable } from 'promise-toolbox'
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
#parentVdiPaths
#vhds
async checkBaseVdis(baseUuidToSrcVdi) {
this.#parentVdiPaths = {}
const { handler } = this._adapter
const adapter = this._adapter
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdiUuid]) => {
let parentDestPath
const vhdDir = `${vdisDir}/${srcVdiUuid}`
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
const vhds = await handler.list(vhdDir, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
await asyncMap(vhds, async path => {
try {
await checkVhdChain(handler, path)
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
//
// since all the checks of a path are done in parallel, found would be containing
// only the last answer of isMergeableParent which is probably not the right one
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
// the last one is probably the right one
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
found = found || isMergeable
for (let i = vhds.length - 1; i >= 0 && parentDestPath === undefined; i--) {
const path = vhds[i]
try {
if (await adapter.isMergeableParent(packedBaseUuid, path)) {
parentDestPath = path
}
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
}
})
}
} catch (error) {
warn('checkBaseVdis', { error })
}
if (!found) {
// no usable parent => the runner will have to decide to fall back to a full or stop backup
if (parentDestPath === undefined) {
baseUuidToSrcVdi.delete(baseUuid)
} else {
this.#parentVdiPaths[vhdDir] = parentDestPath
}
})
}
@@ -123,6 +124,44 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
}
}
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
assert.notStrictEqual(
this.#vhds,
undefined,
'_transfer must be called before updateUuidAndChain for incremental backups'
)
const parentVdiPaths = this.#parentVdiPaths
const { handler } = this._adapter
const vhds = this.#vhds
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
const path = `${this._vmBackupDir}/${vhds[id]}`
if (isDifferencing) {
assert.notStrictEqual(
parentVdiPaths,
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
)
const parentPath = parentVdiPaths[dirname(path)]
// we are in a incremental backup
// we already computed the chain in checkBaseVdis
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
// forbid any kind of loop
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD if needed
await Disposable.use(openVhd(handler, path), async vhd => {
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
}
})
})
}
async _deleteOldEntries() {
const adapter = this._adapter
const oldEntries = this._oldEntries
@@ -141,14 +180,10 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
const jobId = job.id
const handler = adapter.handler
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// @todo : should skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
}
const basename = formatFilenameDate(timestamp)
const vhds = mapValues(
// update this.#vhds before eventually skipping transfer, so that
// updateUuidAndChain has all the mandatory data
const vhds = (this.#vhds = mapValues(
deltaExport.vdis,
vdi =>
`vdis/${jobId}/${
@@ -158,7 +193,15 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vdi.uuid
: vdi.$snapshot_of$uuid
}/${adapter.getVhdFileName(basename)}`
)
))
let metadataContent = await this._isAlreadyTransferred(timestamp)
if (metadataContent !== undefined) {
// skip backup while being vigilant to not stuck the forked stream
Task.info('This backup has already been transfered')
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
return { size: 0 }
}
metadataContent = {
isVhdDifferencing,
@@ -174,38 +217,13 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
vm,
vmSnapshot,
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
let transferSize = 0
await asyncEach(
Object.entries(deltaExport.vdis),
async ([id, vdi]) => {
Object.keys(deltaExport.vdis),
async id => {
const path = `${this._vmBackupDir}/${vhds[id]}`
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
let parentPath
if (isDifferencing) {
const vdiDir = dirname(path)
parentPath = (
await handler.list(vdiDir, {
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
prependDir: true,
})
)
.sort()
.pop()
assert.notStrictEqual(
parentPath,
undefined,
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
)
parentPath = parentPath.slice(1) // remove leading slash
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
// don't write it as transferSize += await async function
// since i += await asyncFun lead to race condition
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
@@ -217,17 +235,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
writeBlockConcurrency: this._config.writeBlockConcurrency,
})
transferSize += transferSizeOneDisk
if (isDifferencing) {
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD
await Disposable.use(openVhd(handler, path), async vhd => {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
},
{
concurrency: settings.diskPerVmConcurrency,

View File

@@ -1,3 +1,4 @@
import assert from 'node:assert'
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import { formatDateTime } from '@xen-orchestra/xapi'
@@ -14,6 +15,7 @@ import find from 'lodash/find.js'
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
assert.notStrictEqual(baseVm, undefined)
const sr = this._sr
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
@@ -36,7 +38,9 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
}
}
}
updateUuidAndChain() {
// nothing to do, the chaining is not modified in this case
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({

View File

@@ -5,6 +5,10 @@ export class AbstractIncrementalWriter extends AbstractWriter {
throw new Error('Not implemented')
}
updateUuidAndChain() {
throw new Error('Not implemented')
}
cleanup() {
throw new Error('Not implemented')
}

View File

@@ -113,13 +113,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
)
}
_isAlreadyTransferred(timestamp) {
async _isAlreadyTransferred(timestamp) {
const vmUuid = this._vmUuid
const adapter = this._adapter
const backupDir = getVmBackupDir(vmUuid)
try {
const actualMetadata = JSON.parse(
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
await adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
)
return actualMetadata
} catch (error) {}

View File

@@ -230,6 +230,7 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
- `cleanup()`
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
- **Full**

View File

@@ -20,5 +20,7 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
// paths are made absolute otherwise fs.relative() would resolve them against working directory
export const relativeFromFile = (file, path) => relative(dirname(normalize(file)), normalize(path))
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -0,0 +1,17 @@
import { describe, it } from 'test'
import { strict as assert } from 'assert'
import { relativeFromFile } from './path.js'
describe('relativeFromFile()', function () {
for (const [title, args] of Object.entries({
'file absolute and path absolute': ['/foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file relative and path absolute': ['foo/bar/file.vhd', '/foo/baz/path.vhd'],
'file absolute and path relative': ['/foo/bar/file.vhd', 'foo/baz/path.vhd'],
'file relative and path relative': ['foo/bar/file.vhd', 'foo/baz/path.vhd'],
})) {
it('works with ' + title, function () {
assert.equal(relativeFromFile(...args), '../baz/path.vhd')
})
}
})

View File

@@ -54,10 +54,10 @@ async function handleExistingFile(root, indexPath, path) {
await indexFile(fullPath, indexPath)
}
}
} catch (err) {
if (err.code !== 'EEXIST') {
} catch (error) {
if (error.code !== 'EEXIST') {
// there can be a symbolic link in the tree
warn('handleExistingFile', err)
warn('handleExistingFile', { error })
}
}
}
@@ -106,7 +106,7 @@ export async function watchRemote(remoteId, { root, immutabilityDuration, rebuil
await File.liftImmutability(settingPath)
} catch (error) {
// file may not exists, and it's not really a problem
info('lifting immutability on current settings', error)
info('lifting immutability on current settings', { error })
}
await fs.writeFile(
settingPath,

View File

@@ -50,7 +50,17 @@ const RRD_POINTS_PER_STEP: { [key in RRD_STEP]: number } = {
// Utils
// -------------------------------------------------------------------
function convertNanToNull(value: number) {
function parseNumber(value: number | string) {
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
// strings to support NaN, Infinity and -Infinity
if (typeof value === 'string') {
const asNumber = +value
if (isNaN(asNumber) && value !== 'NaN') {
throw new Error('cannot parse number: ' + value)
}
value = asNumber
}
return isNaN(value) ? null : value
}
@@ -59,7 +69,7 @@ function convertNanToNull(value: number) {
// -------------------------------------------------------------------
const computeValues = (dataRow: any, legendIndex: number, transformValue = identity) =>
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
const createGetProperty = (obj: object, property: string, defaultValue: unknown) =>
defaults(obj, { [property]: defaultValue })[property] as any
@@ -319,8 +329,14 @@ export default class XapiStats {
},
abortSignal,
})
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
return JSON5.parse(await resp.text())
const text = await resp.text()
try {
// starting from XAPI 23.31, the response is valid JSON
return JSON.parse(text)
} catch (error) {
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
return JSON5.parse(text)
}
}
// To avoid multiple requests, we keep a cache for the stats and
@@ -383,7 +399,10 @@ export default class XapiStats {
abortSignal,
})
const actualStep = json.meta.step as number
const actualStep = parseNumber(json.meta.step)
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
if (json.data.length > 0) {
// fetched data is organized from the newest to the oldest
@@ -407,14 +426,15 @@ export default class XapiStats {
let stepStats = xoObjectStats[actualStep]
let cacheStepStats = cacheXoObjectStats[actualStep]
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
const endTimestamp = parseNumber(json.meta.end)
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
stepStats = xoObjectStats[actualStep] = {
endTimestamp: json.meta.end,
endTimestamp,
interval: actualStep,
canBeExpired: false,
}
cacheStepStats = cacheXoObjectStats[actualStep] = {
endTimestamp: json.meta.end,
endTimestamp,
interval: actualStep,
canBeExpired: true,
}
@@ -438,10 +458,6 @@ export default class XapiStats {
})
})
}
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
}
} catch (error) {
if (error instanceof Error && error.name === 'AbortError') {
return

View File

@@ -1,13 +1,15 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
"include": ["env.d.ts", "src/**/*", "src/**/*.vue", "../web-core/lib/**/*", "../web-core/lib/**/*.vue"],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"]
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
}
}
}

View File

@@ -23,6 +23,7 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},

View File

@@ -27,6 +27,16 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -45,6 +45,16 @@ log.error('could not join server', {
})
```
A logging method has the following signature:
```ts
interface LoggingMethod {
(error): void
(message: string, data?: { error?: Error; [property: string]: any }): void
}
```
### Consumer
Then, at application level, configure the logs are handled:

View File

@@ -10,7 +10,8 @@
}
},
"devDependencies": {
"vue": "^3.4.13"
"vue": "^3.4.13",
"@vue/tsconfig": "^0.5.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/web-core",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
@@ -25,6 +26,6 @@
},
"license": "AGPL-3.0-or-later",
"engines": {
"node": ">=8.10"
"node": ">=18"
}
}

View File

@@ -0,0 +1,12 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "lib/**/*", "lib/**/*.vue"],
"exclude": ["lib/**/__tests__/*"],
"compilerOptions": {
"noEmit": true,
"baseUrl": ".",
"paths": {
"@core/*": ["./lib/*"]
}
}
}

View File

@@ -1,13 +1,22 @@
{
"extends": "@vue/tsconfig/tsconfig.dom.json",
"include": ["env.d.ts", "typed-router.d.ts", "src/**/*", "src/**/*.vue"],
"include": [
"env.d.ts",
"typed-router.d.ts",
"src/**/*",
"src/**/*.vue",
"../web-core/lib/**/*",
"../web-core/lib/**/*.vue"
],
"exclude": ["src/**/__tests__/*"],
"compilerOptions": {
"composite": true,
"noEmit": true,
"baseUrl": ".",
"rootDir": "..",
"paths": {
"@/*": ["./src/*"]
"@/*": ["./src/*"],
"@core/*": ["../web-core/lib/*"]
}
}
}

View File

@@ -11,6 +11,7 @@ export default defineConfig({
resolve: {
alias: {
'@': fileURLToPath(new URL('./src', import.meta.url)),
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
},
},
})

View File

@@ -83,9 +83,31 @@ class Vdi {
}
}
// return an buffer with 0/1 bit, showing if the 64KB block corresponding
// in the raw vdi has changed
async listChangedBlock(ref, baseRef){
console.log('listchanged blocks', ref, baseRef)
const encoded = await this.call('VDI.list_changed_blocks', baseRef, ref)
console.log({encoded})
const buf = Buffer.from(encoded, 'base64')
console.log({buf})
return buf
}
async enableChangeBlockTracking(ref){
return this.call('VDI.enable_cbt', ref)
}
async disableChangeBlockTracking(ref){
return this.call('VDI.disable_cbt', ref)
}
async dataDestroy(ref){
return this.call('VDI.data_destroy', ref)
}
async exportContent(
ref,
{ baseRef, cancelToken = CancelToken.none, format, nbdConcurrency = 1, preferNbd = this._preferNbd }
{ baseRef, cancelToken = CancelToken.none, changedBlocks, format, nbdConcurrency = 1, preferNbd = this._preferNbd }
) {
const query = {
format,
@@ -114,7 +136,7 @@ class Vdi {
})
if (nbdClient !== undefined && format === VDI_FORMAT_VHD) {
const taskRef = await this.task_create(`Exporting content of VDI ${vdiName} using NBD`)
stream = await createNbdVhdStream(nbdClient, stream)
stream = await createNbdVhdStream(nbdClient, stream, {changedBlocks})
stream.on('progress', progress => this.call('task.set_progress', taskRef, progress))
finished(stream, () => this.task_destroy(taskRef))
}

View File

@@ -21,12 +21,23 @@ export default class Vif {
MAC = '',
} = {}
) {
if (device === undefined) {
const allowedDevices = await this.call('VM.get_allowed_VIF_devices', VM)
if (allowedDevices.length === 0) {
const error = new Error('could not find an allowed VIF device')
error.poolUuid = this.pool.uuid
error.vmRef = VM
throw error
}
device = allowedDevices[0]
}
const [powerState, ...rest] = await Promise.all([
this.getField('VM', VM, 'power_state'),
device ?? (await this.call('VM.get_allowed_VIF_devices', VM))[0],
MTU ?? (await this.getField('network', network, 'MTU')),
MTU ?? this.getField('network', network, 'MTU'),
])
;[device, MTU] = rest
;[MTU] = rest
const vifRef = await this.call('VIF.create', {
currently_attached: powerState === 'Suspended' ? currently_attached : undefined,

View File

@@ -1,6 +1,8 @@
# ChangeLog
## **next**
## **5.91.2** (2024-02-09)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Enhancements
@@ -39,8 +41,6 @@
## **5.91.0** (2024-01-31)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [Import/VMWare] Speed up import and make all imports thin [#7323](https://github.com/vatesfr/xen-orchestra/issues/7323)

View File

@@ -7,12 +7,23 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Self service] From user POV, show used resources even when they are unlimited (PR [#7353](https://github.com/vatesfr/xen-orchestra/pull/7353))
- Disable search engine indexing via a `robots.txt`
- [Stats] Support format used by XAPI 23.31
- [REST API] Export host [SMT](https://en.wikipedia.org/wiki/Simultaneous_multithreading) status at `/hosts/:id/smt` [Forum#71374](https://xcp-ng.org/forum/post/71374)
- [Home & REST API] `$container` field of an halted VM now points to a host if a VDI is on a local storage [Forum#71769](https://xcp-ng.org/forum/post/71769)
- [Size Input] Ability to select two new units in the dropdown (`TiB`, `PiB`) (PR [#7382](https://github.com/vatesfr/xen-orchestra/pull/7382))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Settings/XO Config] Sort backups from newest to oldest
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
- [Remotes] Correctly clear error when the remote is tested with success
- [Import/VMWare] Fix importing last snapshot (PR [#7370](https://github.com/vatesfr/xen-orchestra/pull/7370))
- [Host/Reboot] Fix false positive warning when restarting an host after updates (PR [#7366](https://github.com/vatesfr/xen-orchestra/pull/7366))
### Packages to release
> When modifying a package, add it here with its release type.
@@ -29,7 +40,13 @@
<!--packages-start-->
- @xen-orchestra/backups patch
- @xen-orchestra/fs patch
- @xen-orchestra/xapi minor
- @vates/nbd-client minor
- vhd-lib patch
- xo-server minor
- xo-server-audit patch
- xo-web minor
<!--packages-end-->

View File

@@ -93,6 +93,21 @@ Follow the instructions:
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
If you want to use static IP address for your appliance:
```sh
xe vm-param-set uuid="$uuid" \
xenstore-data:vm-data/ip="$ip" \
xenstore-data:vm-data/netmask="$netmask" \
xenstore-data:vm-data/gateway="$gateway"
```
If you want to replace the default DNS server:
```sh
xe vm-param-set uuid="$uuid" xenstore-data:vm-data/dns="$dns"
```
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
## First console connection

View File

@@ -1,4 +1,6 @@
{
"name": "xen-orchestra",
"version": "0.0.0",
"devDependencies": {
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
@@ -94,7 +96,7 @@
},
"private": true,
"scripts": {
"build": "turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
"build": "TURBO_TELEMETRY_DISABLED=1 turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
"build:xo-lite": "turbo run build --scope @xen-orchestra/lite",
"clean": "scripts/run-script.js --parallel clean",
"dev": "scripts/run-script.js --parallel --concurrency 0 --verbose dev",

View File

@@ -46,9 +46,9 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
}
get compressionType() {
const compressionType = this.vhds[0].compressionType
for (let i = 0; i < this.vhds.length; i++) {
if (compressionType !== this.vhds[i].compressionType) {
const compressionType = this.#vhds[0].compressionType
for (let i = 0; i < this.#vhds.length; i++) {
if (compressionType !== this.#vhds[i].compressionType) {
return 'MIXED'
}
}

View File

@@ -1,6 +1,6 @@
'use strict'
const { dirname, relative } = require('path')
const { relativeFromFile } = require('@xen-orchestra/fs/path')
const { openVhd } = require('./openVhd')
const { DISK_TYPES } = require('./_constants')
@@ -21,7 +21,7 @@ module.exports = async function chain(parentHandler, parentPath, childHandler, c
}
await childVhd.readBlockAllocationTable()
const parentName = relative(dirname(childPath), parentPath)
const parentName = relativeFromFile(childPath, parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)

View File

@@ -14,6 +14,7 @@ const {
const { fuHeader, checksumStruct } = require('./_structs')
const assert = require('node:assert')
const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
const MAX_DURATION_BETWEEN_PROGRESS_EMIT = 5e3
const MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT = 1
@@ -34,10 +35,42 @@ exports.createNbdRawStream = function createRawStream(nbdClient) {
return stream
}
function batContainsBlock(bat, blockId) {
const entry = bat.readUInt32BE(blockId * 4)
if (entry !== BLOCK_UNUSED) {
return [{ blockId, size: DEFAULT_BLOCK_SIZE }]
}
}
// one 2MB VHD block is in 32 blocks of 64KB
// 32 bits are written in 8 4bytes uint32
const EMPTY_NBD_BUFFER = Buffer.alloc(NBD_DEFAULT_BLOCK_SIZE, 0)
function cbtContainsBlock(cbt, blockId) {
const subBlocks = []
let hasOne = false
for (let i = 0; i < 32; i++) {
const position = blockId * 32 + i
const bitOffset = position & 7 // in byte
const byteIndex = position >> 3 // in buffer
const bit = (cbt[byteIndex] >> bitOffset) & 1
if (bit === 1) {
console.log('CBT contains block', blockId)
console.log({position,bitOffset,byteIndex, cbt:cbt[byteIndex],bit})
subBlocks.push({ blockId: position, size: NBD_DEFAULT_BLOCK_SIZE })
hasOne = true
} else {
// don't read empty blocks
subBlocks.push({ buffer: EMPTY_NBD_BUFFER })
}
}
if (hasOne) {
return subBlocks
}
}
exports.createNbdVhdStream = async function createVhdStream(
nbdClient,
sourceStream,
{
changedBlocks,
maxDurationBetweenProgressEmit = MAX_DURATION_BETWEEN_PROGRESS_EMIT,
minTresholdPercentBetweenProgressEmit = MIN_TRESHOLD_PERCENT_BETWEEN_PROGRESS_EMIT,
} = {}
@@ -51,7 +84,10 @@ exports.createNbdVhdStream = async function createVhdStream(
await skipStrict(sourceStream, header.tableOffset - (FOOTER_SIZE + HEADER_SIZE))
// new table offset
header.tableOffset = FOOTER_SIZE + HEADER_SIZE
const streamBat = await readChunkStrict(sourceStream, batSize)
let streamBat
if (changedBlocks === undefined) {
streamBat = await readChunkStrict(sourceStream, batSize)
}
let offset = FOOTER_SIZE + HEADER_SIZE + batSize
// check if parentlocator are ordered
let precLocator = 0
@@ -79,14 +115,14 @@ exports.createNbdVhdStream = async function createVhdStream(
// compute a BAT with the position that the block will have in the resulting stream
// blocks starts directly after parent locator entries
const entries = []
for (let i = 0; i < header.maxTableEntries; i++) {
const entry = streamBat.readUInt32BE(i * 4)
if (entry !== BLOCK_UNUSED) {
bat.writeUInt32BE(offsetSector, i * 4)
entries.push(i)
for (let blockId = 0; blockId < header.maxTableEntries; blockId++) {
const subBlocks = changedBlocks ? cbtContainsBlock(changedBlocks, blockId) : batContainsBlock(streamBat, blockId)
if (subBlocks !== undefined) {
bat.writeUInt32BE(offsetSector, blockId * 4)
entries.push({ blockId, subBlocks })
offsetSector += blockSizeInSectors
} else {
bat.writeUInt32BE(BLOCK_UNUSED, i * 4)
bat.writeUInt32BE(BLOCK_UNUSED, blockId * 4)
}
}
@@ -137,8 +173,10 @@ exports.createNbdVhdStream = async function createVhdStream(
// yield blocks from nbd
const nbdIterator = nbdClient.readBlocks(function* () {
for (const entry of entries) {
yield { index: entry, size: DEFAULT_BLOCK_SIZE }
for (const { subBlocks } of entries) {
for (const { blockId, buffer, size } of subBlocks) {
yield { index: blockId, buffer, size }
}
}
})
const bitmap = Buffer.alloc(SECTOR_SIZE, 255)

View File

@@ -72,6 +72,7 @@ const DEFAULT_BLOCKED_LIST = {
'system.getServerTimezone': true,
'system.getServerVersion': true,
'system.getVersion': true,
'tag.getAllConfigured': true,
'test.getPermissionsForUser': true,
'user.getAll': true,
'user.getAuthenticationTokens': true,

View File

@@ -143,6 +143,7 @@ port = 80
requestTimeout = 0
[http.mounts]
'/robots.txt' = './robots.txt'
'/' = '../xo-web/dist/'
'/v6' = '../../@xen-orchestra/web/dist/'

View File

@@ -0,0 +1,2 @@
User-agent: *
Disallow: /

View File

@@ -27,7 +27,7 @@ async function sendToNagios(app, jobName, vmBackupInfo) {
jobName
)
} catch (error) {
warn('sendToNagios:', error)
warn('sendToNagios:', { error })
}
}

View File

@@ -1,3 +1,4 @@
import semver from 'semver'
import { createLogger } from '@xen-orchestra/log'
import assert from 'assert'
import { format } from 'json-rpc-peer'
@@ -136,13 +137,38 @@ export async function restart({
const pool = this.getObject(host.$poolId, 'pool')
const master = this.getObject(pool.master, 'host')
const hostRebootRequired = host.rebootRequired
if (hostRebootRequired && host.id !== master.id && host.version === master.version) {
throw incorrectState({
actual: hostRebootRequired,
expected: false,
object: master.id,
property: 'rebootRequired',
})
// we are currently in an host upgrade process
if (hostRebootRequired && host.id !== master.id) {
// this error is not ideal but it means that the pool master must be fully upgraded/rebooted before the current host can be rebooted.
//
// there is a single error for the 3 cases because the client must handle them the same way
const throwError = () =>
incorrectState({
actual: hostRebootRequired,
expected: false,
object: master.id,
property: 'rebootRequired',
})
if (semver.lt(master.version, host.version)) {
log.error(`master version (${master.version}) is older than the host version (${host.version})`, {
masterId: master.id,
hostId: host.id,
})
throwError()
}
if (semver.eq(master.version, host.version)) {
if ((await this.getXapi(host).listMissingPatches(master._xapiId)).length > 0) {
log.error('master has missing patches', { masterId: master.id })
throwError()
}
if (master.rebootRequired) {
log.error('master needs to reboot')
throwError()
}
}
}
}

View File

@@ -328,6 +328,34 @@ const TRANSFORMS = {
const { creation } = xoData.extract(obj) ?? {}
let $container
if (obj.resident_on !== 'OpaqueRef:NULL') {
// resident_on is set when the VM is running (or paused or suspended on a host)
$container = link(obj, 'resident_on')
} else {
// if the VM is halted, the $container is the pool
$container = link(obj, 'pool')
// unless one of its VDI is on a non shared SR
//
// linked objects may not be there when this code run, and it will only be
// refreshed when the VM XAPI record change, this value is not guaranteed
// to be up-to-date, but it practice it appears to work fine thanks to
// `VBDs` and `current_operations` changing when a VDI is
// added/removed/migrated
for (const vbd of obj.$VBDs) {
const sr = vbd?.$VDI?.$SR
if (sr !== undefined && !sr.shared) {
const pbd = sr.$PBDs[0]
const hostId = pbd && link(pbd, 'host')
if (hostId !== undefined) {
$container = hostId
break
}
}
}
}
const vm = {
// type is redefined after for controllers/, templates &
// snapshots.
@@ -422,8 +450,7 @@ const TRANSFORMS = {
xenTools,
...getVmGuestToolsProps(obj),
// TODO: handle local VMs (`VM.get_possible_hosts()`).
$container: isRunning ? link(obj, 'resident_on') : link(obj, 'pool'),
$container,
$VBDs: link(obj, 'VBDs'),
// TODO: dedupe
@@ -588,7 +615,7 @@ const TRANSFORMS = {
vdi(obj) {
const vdi = {
type: 'VDI',
cbt_enabled: obj.cbt_enabled,
missing: obj.missing,
name_description: obj.name_description,
name_label: obj.name_label,

View File

@@ -45,7 +45,17 @@ const RRD_POINTS_PER_STEP = {
// Utils
// -------------------------------------------------------------------
function convertNanToNull(value) {
function parseNumber(value) {
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
// strings to support NaN, Infinity and -Infinity
if (typeof value === 'string') {
const asNumber = +value
if (isNaN(asNumber) && value !== 'NaN') {
throw new Error('cannot parse number: ' + value)
}
value = asNumber
}
return isNaN(value) ? null : value
}
@@ -58,7 +68,7 @@ async function getServerTimestamp(xapi, hostRef) {
// -------------------------------------------------------------------
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
const combineStats = (stats, path, combineValues) => zipWith(...map(stats, path), (...values) => combineValues(values))
@@ -245,7 +255,15 @@ export default class XapiStats {
start: timestamp,
},
})
.then(response => response.text().then(JSON5.parse))
.then(response => response.text())
.then(data => {
try {
// starting from XAPI 23.31, the response is valid JSON
return JSON.parse(data)
} catch (_) {
return JSON5.parse(data)
}
})
.catch(err => {
delete this.#hostCache[hostUuid][step]
throw err
@@ -299,7 +317,7 @@ export default class XapiStats {
// To avoid crossing over the boundary, we ask for one less step
const optimumTimestamp = currentTimeStamp - maxDuration + step
const json = await this._getJson(xapi, host, optimumTimestamp, step)
const actualStep = json.meta.step
const actualStep = parseNumber(json.meta.step)
if (actualStep !== step) {
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
@@ -326,9 +344,10 @@ export default class XapiStats {
return
}
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
const endTimestamp = parseNumber(json.meta.end)
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
stepStats = {
endTimestamp: json.meta.end,
endTimestamp,
interval: actualStep,
stats: {},
}

View File

@@ -280,7 +280,7 @@ export default class MigrateVm {
const stream = vhd.stream()
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
}
return vhd
return { vdi, vhd }
})
)
)

View File

@@ -1,5 +1,6 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy.js'
import { basename } from 'path'
import { createLogger } from '@xen-orchestra/log'
import { format, parse } from 'xo-remote-parser'
import {
DEFAULT_ENCRYPTION_ALGORITHM,
@@ -17,17 +18,35 @@ import { Remotes } from '../models/remote.mjs'
// ===================================================================
const { warn } = createLogger('xo:mixins:remotes')
const obfuscateRemote = ({ url, ...remote }) => {
const parsedUrl = parse(url)
remote.url = format(sensitiveValues.obfuscate(parsedUrl))
return remote
}
function validatePath(url) {
const { path } = parse(url)
// these properties should be defined on the remote object itself and not as
// part of the remote URL
//
// there is a bug somewhere that keep putting them into the URL, this list
// is here to help track it
const INVALID_URL_PARAMS = ['benchmarks', 'id', 'info', 'name', 'proxy', 'enabled', 'error', 'url']
function validateUrl(url) {
const parsedUrl = parse(url)
const { path } = parsedUrl
if (path !== undefined && basename(path) === 'xo-vm-backups') {
throw invalidParameters('remote url should not end with xo-vm-backups')
}
for (const param of INVALID_URL_PARAMS) {
if (Object.hasOwn(parsedUrl, param)) {
// log with stack trace
warn(new Error('invalid remote URL param ' + param))
}
}
}
export default class {
@@ -182,6 +201,22 @@ export default class {
if (remote === undefined) {
throw noSuchObject(id, 'remote')
}
const parsedUrl = parse(remote.url)
let fixed = false
for (const param of INVALID_URL_PARAMS) {
if (Object.hasOwn(parsedUrl, param)) {
// delete the value to trace its real origin when it's added back
// with `updateRemote()`
delete parsedUrl[param]
fixed = true
}
}
if (fixed) {
remote.url = format(parsedUrl)
this._remotes.update(remote).catch(warn)
}
return remote
}
@@ -202,7 +237,7 @@ export default class {
}
async createRemote({ name, options, proxy, url }) {
validatePath(url)
validateUrl(url)
const params = {
enabled: false,
@@ -219,6 +254,10 @@ export default class {
}
updateRemote(id, { enabled, name, options, proxy, url }) {
if (url !== undefined) {
validateUrl(url)
}
const handlers = this._handlers
const handler = handlers[id]
if (handler !== undefined) {
@@ -238,7 +277,7 @@ export default class {
@synchronized()
async _updateRemote(id, { url, ...props }) {
if (url !== undefined) {
validatePath(url)
validateUrl(url)
}
const remote = await this._getRemote(id)

View File

@@ -55,10 +55,10 @@ const normalize = set => ({
limits: set.limits
? map(set.limits, limit =>
isObject(limit)
? { ...limit, usage: limit.usage ?? 0 }
? limit
: {
available: limit,
total: limit,
usage: 0,
}
)
: {},
@@ -217,32 +217,25 @@ export default class {
if (objects) {
set.objects = objects
}
const previousLimits = set.limits
const newLimits = {}
forEach(limits, (quantity, id) => {
const previous = previousLimits[id]
if (previous !== undefined) {
newLimits[id] = {
total: quantity,
usage: previous.usage,
if (limits) {
const previousLimits = set.limits
set.limits = map(limits, (quantity, id) => {
const previous = previousLimits[id]
if (!previous) {
return {
available: quantity,
total: quantity,
}
}
} else {
newLimits[id] = {
const { available, total } = previous
return {
available: available - total + quantity,
total: quantity,
usage: 0,
}
}
})
const removedLimits = Object.keys(previousLimits).filter(key => !(key in newLimits))
removedLimits.forEach(id => {
newLimits[id] = {
usage: previousLimits[id].usage ?? 0,
}
})
set.limits = newLimits
})
}
if (ipPools) {
set.ipPools = ipPools
}
@@ -339,16 +332,15 @@ export default class {
forEach(limits, (quantity, id) => {
const limit = set.limits[id]
if (!limit) {
set.limits[id] = { usage: quantity }
return
}
if ((limit.usage += quantity) > limit.total && !force) {
if ((limit.available -= quantity) < 0 && !force) {
throw notEnoughResources([
{
resourceSet: setId,
resourceType: id,
available: limit.total - (limit.usage - quantity),
available: limit.available + quantity,
requested: quantity,
},
])
@@ -366,8 +358,8 @@ export default class {
return
}
if ((limit.usage -= quantity) < 0) {
limit.usage = 0
if ((limit.available += quantity) > limit.total) {
limit.available = limit.total
}
})
await this._save(set)
@@ -379,7 +371,7 @@ export default class {
forEach(limits, (limit, id) => {
if (VM_RESOURCES[id] || id.startsWith('ipPool:')) {
// only reset VMs related limits
limit.usage = 0
limit.available = limit.total
}
})
})
@@ -405,9 +397,7 @@ export default class {
forEach(await this.computeResourcesUsage(this._app.getObject(object.$id)), (usage, resource) => {
const limit = limits[resource]
if (limit) {
limit.usage += usage
} else {
limits[resource] = { usage }
limit.available -= usage
}
})
})

View File

@@ -253,6 +253,10 @@ export default class RestApi {
const host = req.xapiObject
res.json(await host.$xapi.listMissingPatches(host))
},
async smt({ xapiObject }, res) {
res.json({ enabled: await xapiObject.$xapi.isHyperThreadingEnabled(xapiObject.$id) })
},
}
collections.pools.routes = {

View File

@@ -138,7 +138,7 @@ export class Range extends Component {
export Toggle from './toggle'
const UNITS = ['kiB', 'MiB', 'GiB']
const UNITS = ['kiB', 'MiB', 'GiB', 'TiB', 'PiB']
const DEFAULT_UNIT = 'GiB'
export class SizeInput extends BaseComponent {

View File

@@ -590,6 +590,9 @@ const messages = {
preferNbd: 'Use NBD protocol to transfer disk if available',
preferNbdInformation: 'A network accessible by XO or the proxy must have NBD enabled',
nbdConcurrency: 'Number of NBD connexion per disk',
deltaComputationMode: 'Delta computation mode',
deltaComputationModeSnapshot: 'Snapshot comparison',
deltaComputationModeCbt: 'Change Block Tracking',
// ------ New Remote -----
newRemote: 'New file system remote',

View File

@@ -75,7 +75,7 @@ export const reportOnSupportPanel = async ({ files = [], formatMessage = identit
ADDITIONAL_FILES.map(({ fetch, name }) =>
timeout.call(fetch(), ADDITIONAL_FILES_FETCH_TIMEOUT).then(
file => formData.append('attachments', createBlobFromString(file), name),
error => logger.warn(`cannot get ${name}`, error)
error => logger.warn(`cannot get ${name}`, { error })
)
)
)

View File

@@ -31,10 +31,11 @@ export default class ResourceSetQuotas extends Component {
forEach(RESOURCES, resource => {
if (limits[resource] != null) {
const { total, usage } = limits[resource]
const { available, total } = limits[resource]
quotas[resource] = {
available,
total,
usage,
usage: total - available,
}
}
})
@@ -88,26 +89,22 @@ export default class ResourceSetQuotas extends Component {
<CardBlock className='text-center'>
{quota !== undefined ? (
<div>
{Number.isFinite(quota.total) ? (
<ChartistGraph
data={{
labels,
series: [quota.total - quota.usage, quota.usage],
}}
options={{
donut: true,
donutWidth: 40,
showLabel: false,
}}
type='Pie'
/>
) : (
<p className='text-xs-center display-1'>&infin;</p>
)}
<ChartistGraph
data={{
labels,
series: [quota.available, quota.usage],
}}
options={{
donut: true,
donutWidth: 40,
showLabel: false,
}}
type='Pie'
/>
<p className='text-xs-center'>
{_('resourceSetQuota', {
total: !Number.isFinite(quota.total) ? Infinity : formatSize(quota.total),
usage: validFormat ? quota.usage?.toString() : formatSize(quota.usage),
total: validFormat ? quota.total.toString() : formatSize(quota.total),
usage: validFormat ? quota.usage.toString() : formatSize(quota.usage),
})}
</p>
</div>

View File

@@ -1099,7 +1099,9 @@ export const SelectXoCloudConfig = makeSubscriptionSelect(
subscriber =>
subscribeCloudXoConfigBackups(configs => {
const xoObjects = groupBy(
map(configs, config => ({ ...config, type: 'xoConfig' })),
map(configs, config => ({ ...config, type: 'xoConfig' }))
// from newest to oldest
.sort((a, b) => b.createdAt - a.createdAt),
'xoaId'
)
subscriber({

View File

@@ -45,6 +45,7 @@ import { RemoteProxy, RemoteProxyWarning } from './_remoteProxy'
import getSettingsWithNonDefaultValue from '../_getSettingsWithNonDefaultValue'
import { canDeltaBackup, constructPattern, destructPattern, FormFeedback, FormGroup, Input, Li, Ul } from './../utils'
import Select from '../../../common/form/select'
export NewMetadataBackup from './metadata'
export NewMirrorBackup from './mirror'
@@ -635,11 +636,18 @@ const New = decorate([
nbdConcurrency,
})
},
setDeltaComputationMode({ setGlobalSettings }, deltaComputeMode) {
console.log({deltaComputeMode})
setGlobalSettings({
deltaComputeMode: deltaComputeMode.value,
})
},
},
computed: {
compressionId: generateId,
formId: generateId,
inputConcurrencyId: generateId,
inputDeltaComputationMode: generateId,
inputFullIntervalId: generateId,
inputMaxExportRate: generateId,
inputPreferNbd: generateId,
@@ -753,6 +761,7 @@ const New = decorate([
const {
checkpointSnapshot,
concurrency,
deltaComputationMode = 'AGAINST_PREVIOUS_SNAPSHOT',
fullInterval,
maxExportRate,
nbdConcurrency = 1,
@@ -1107,6 +1116,24 @@ const New = decorate([
offlineSnapshot={offlineSnapshot}
setGlobalSettings={effects.setGlobalSettings}
/>
{state.isDelta && (
<FormGroup>
<label htmlFor={state.inputDeltaComputationMode}>
<strong>{_('deltaComputationMode')}</strong>
</label>
<Select
id={state.inputDeltaComputationMode}
onChange={effects.setDeltaComputationMode}
value={deltaComputationMode}
disabled={!state.inputPreferNbd}
options={[
{ label: _('deltaComputationModeSnapshot'), value: 'AGAINST_PREVIOUS_SNAPSHOT' },
{ label: _('deltaComputationModeCbt'), value: 'CBT' },
]}
/>
</FormGroup>
)}
</div>
)}
</CardBlock>

View File

@@ -1870,21 +1870,29 @@ export default class NewVm extends BaseComponent {
{limits && (
<Row>
<Col size={3}>
{cpusLimits?.total !== undefined && (
<Limits limit={cpusLimits.total} toBeUsed={CPUs * factor} used={cpusLimits.usage} />
{cpusLimits && (
<Limits
limit={cpusLimits.total}
toBeUsed={CPUs * factor}
used={cpusLimits.total - cpusLimits.available}
/>
)}
</Col>
<Col size={3}>
{memoryLimits?.total !== undefined && (
<Limits limit={memoryLimits.total} toBeUsed={_memory * factor} used={memoryLimits.usage} />
{memoryLimits && (
<Limits
limit={memoryLimits.total}
toBeUsed={_memory * factor}
used={memoryLimits.total - memoryLimits.available}
/>
)}
</Col>
<Col size={3}>
{diskLimits?.total !== undefined && (
{diskLimits && (
<Limits
limit={diskLimits.total}
toBeUsed={(sumBy(VDIs, 'size') + sum(map(existingDisks, disk => disk.size))) * factor}
used={diskLimits.usage}
used={diskLimits.total - diskLimits.available}
/>
)}
</Col>
@@ -1915,10 +1923,10 @@ export default class NewVm extends BaseComponent {
const factor = multipleVms ? nameLabels.length : 1
return !(
CPUs * factor > get(() => resourceSet.limits.cpus.total - resourceSet.limits.cpus.usage) ||
_memory * factor > get(() => resourceSet.limits.memory.total - resourceSet.limits.memory.usage) ||
CPUs * factor > get(() => resourceSet.limits.cpus.available) ||
_memory * factor > get(() => resourceSet.limits.memory.available) ||
(sumBy(VDIs, 'size') + sum(map(existingDisks, disk => disk.size))) * factor >
get(() => resourceSet.limits.disk.total - resourceSet.limits.disk.usage)
get(() => resourceSet.limits.disk.available)
)
}
}

View File

@@ -5,10 +5,9 @@ import decorate from 'apply-decorators'
import Icon from 'icon'
import React from 'react'
import { confirm } from 'modal'
import { getApiApplianceInfo, subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
import { groupBy, sortBy } from 'lodash'
import { injectState, provideState } from 'reaclette'
import { SelectXoCloudConfig } from 'select-objects'
import { subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
import BackupXoConfigModal from './backup-xo-config-modal'
import RestoreXoConfigModal from './restore-xo-config-modal'
@@ -88,15 +87,7 @@ const CloudConfig = decorate([
},
},
computed: {
applianceId: async () => {
const { id } = await getApiApplianceInfo()
return id
},
groupedConfigs: ({ applianceId, sortedConfigs }) =>
sortBy(groupBy(sortedConfigs, 'xoaId'), config => (config[0].xoaId === applianceId ? -1 : 1)),
isConfigDefined: ({ config }) => config != null,
sortedConfigs: (_, { cloudXoConfigBackups }) =>
cloudXoConfigBackups?.sort((config, nextConfig) => config.createdAt - nextConfig.createdAt),
},
}),
injectState,

View File

@@ -33,7 +33,7 @@ const formatError = error => (typeof error === 'string' ? error : JSON.stringify
const _changeUrlElement = (value, { remote, element }) =>
editRemote(remote, {
url: format({ ...remote, [element]: value === null ? undefined : value }),
url: format({ ...parse(remote.url), [element]: value === null ? undefined : value }),
})
const _showError = remote => alert(_('remoteConnectionFailed'), <pre>{formatError(remote.error)}</pre>)
const _editRemoteName = (name, { remote }) => editRemote(remote, { name })

2448
yarn.lock

File diff suppressed because it is too large Load Diff