Compare commits
31 Commits
restApi-cr
...
fix_xovmdk
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9797e6aecf | ||
|
|
aefcce45ff | ||
|
|
367fb4d8a6 | ||
|
|
e54a0bfc80 | ||
|
|
9e5541703b | ||
|
|
039d5687c0 | ||
|
|
b89195eb80 | ||
|
|
822cdc3fb8 | ||
|
|
c7b5b715a3 | ||
|
|
56b427c09c | ||
|
|
0e45c52bbc | ||
|
|
4fd2b91fc4 | ||
|
|
7890320a7d | ||
|
|
1718649e0c | ||
|
|
7fc5d62ca9 | ||
|
|
eedaca0195 | ||
|
|
9ffa52cc01 | ||
|
|
e9a23755b6 | ||
|
|
5712f29a58 | ||
|
|
509ebf900e | ||
|
|
757a8915d9 | ||
|
|
35c660dbf6 | ||
|
|
f23fd69e7e | ||
|
|
39c10a7197 | ||
|
|
7a1bc16468 | ||
|
|
93dd1a63da | ||
|
|
b4e1064914 | ||
|
|
810cdc1a77 | ||
|
|
1023131828 | ||
|
|
e2d83324ac | ||
|
|
7cea445c21 |
@@ -65,10 +65,11 @@ module.exports = {
|
||||
typescript: true,
|
||||
'eslint-import-resolver-custom-alias': {
|
||||
alias: {
|
||||
'@core': '../web-core/lib',
|
||||
'@': './src',
|
||||
},
|
||||
extensions: ['.ts'],
|
||||
packages: ['@xen-orchestra/lite'],
|
||||
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -160,10 +160,10 @@ export class ImportVmBackup {
|
||||
// update the stream with the negative vhd stream
|
||||
stream = await negativeVhd.stream()
|
||||
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||
} catch (err) {
|
||||
} catch (error) {
|
||||
// can be a broken VHD chain, a vhd chain with a key backup, ....
|
||||
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
|
||||
warn(`can't use differential restore`, err)
|
||||
warn(`can't use differential restore`, { error })
|
||||
disposableDescendants?.dispose()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,13 +191,14 @@ export class RemoteAdapter {
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
return await Disposable.use(openVhd(this.handler, path), vhd => {
|
||||
return await Disposable.use(VhdSynthetic.fromVhdChain(this.handler, path), vhd => {
|
||||
// this baseUuid is not linked with this vhd
|
||||
if (!vhd.footer.uuid.equals(packedParentUid)) {
|
||||
return false
|
||||
}
|
||||
|
||||
const isVhdDirectory = vhd instanceof VhdDirectory
|
||||
// check if all the chain is composed of vhd directory
|
||||
const isVhdDirectory = vhd.checkVhdsClass(VhdDirectory)
|
||||
return isVhdDirectory
|
||||
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.useVhdDirectory()
|
||||
|
||||
@@ -2,6 +2,7 @@ import { asyncEach } from '@vates/async-each'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import assert from 'node:assert'
|
||||
import * as UUID from 'uuid'
|
||||
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
|
||||
@@ -9,11 +10,48 @@ import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import { openVhd } from 'vhd-lib'
|
||||
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
async _selectBaseVm(metadata) {
|
||||
// for each disk , get the parent
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
|
||||
// no previous backup for a base( =key) backup
|
||||
if (metadata.isBase) {
|
||||
return
|
||||
}
|
||||
await asyncEach(Object.entries(metadata.vdis), async ([id, vdi]) => {
|
||||
const isDifferencing = metadata.isVhdDifferencing[`${id}.vhd`]
|
||||
if (isDifferencing) {
|
||||
const vmDir = getVmBackupDir(metadata.vm.uuid)
|
||||
const path = `${vmDir}/${metadata.vhds[id]}`
|
||||
// don't catch error : we can't recover if the source vhd are missing
|
||||
await Disposable.use(openVhd(this._sourceRemoteAdapter._handler, path), vhd => {
|
||||
baseUuidToSrcVdi.set(UUID.stringify(vhd.header.parentUuid), vdi.$snapshot_of$uuid)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
// check if the parent vdi are present in all the remotes
|
||||
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
throw new Error(`Missing vdi ${baseUuid} which is a base for a delta`)
|
||||
}
|
||||
})
|
||||
// yeah , let's go
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
|
||||
await this._callWriters(async writer => {
|
||||
@@ -26,7 +64,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
await this._selectBaseVm(metadata)
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
@@ -50,6 +88,17 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
// this will update parent name with the needed alias
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.updateUuidAndChain({
|
||||
isVhdDifferencing,
|
||||
timestamp: metadata.timestamp,
|
||||
vdis: incrementalExport.vdis,
|
||||
}),
|
||||
'writer.updateUuidAndChain()'
|
||||
)
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
|
||||
@@ -78,6 +78,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
// we want to control the uuid of the vhd in the chain
|
||||
// and ensure they are correctly chained
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.updateUuidAndChain({
|
||||
isVhdDifferencing,
|
||||
timestamp,
|
||||
vdis: deltaExport.vdis,
|
||||
}),
|
||||
'writer.updateUuidAndChain()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
@@ -133,7 +145,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi.uuid)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
@@ -154,18 +166,18 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
baseUuidToSrcVdi.forEach((srcVdiUuid, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
vdi: srcVdiUuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
vdi: srcVdiUuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
fullVdisRequired.add(srcVdiUuid)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
import assert from 'node:assert'
|
||||
import mapValues from 'lodash/mapValues.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||
import { chainVhd, openVhd } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname } from 'node:path'
|
||||
import { dirname, basename } from 'node:path'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
|
||||
import { Task } from '../../Task.mjs'
|
||||
|
||||
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||
@@ -23,42 +21,45 @@ import { Disposable } from 'promise-toolbox'
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
#parentVdiPaths
|
||||
#vhds
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
this.#parentVdiPaths = {}
|
||||
const { handler } = this._adapter
|
||||
const adapter = this._adapter
|
||||
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdiUuid]) => {
|
||||
let parentDestPath
|
||||
const vhdDir = `${vdisDir}/${srcVdiUuid}`
|
||||
try {
|
||||
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
|
||||
const vhds = await handler.list(vhdDir, {
|
||||
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
})
|
||||
const packedBaseUuid = packUuid(baseUuid)
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
await checkVhdChain(handler, path)
|
||||
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
|
||||
//
|
||||
// since all the checks of a path are done in parallel, found would be containing
|
||||
// only the last answer of isMergeableParent which is probably not the right one
|
||||
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
|
||||
// the last one is probably the right one
|
||||
|
||||
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
|
||||
found = found || isMergeable
|
||||
for (let i = vhds.length - 1; i >= 0 && parentDestPath === undefined; i--) {
|
||||
const path = vhds[i]
|
||||
try {
|
||||
if (await adapter.isMergeableParent(packedBaseUuid, path)) {
|
||||
parentDestPath = path
|
||||
}
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
|
||||
}
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
}
|
||||
if (!found) {
|
||||
// no usable parent => the runner will have to decide to fall back to a full or stop backup
|
||||
if (parentDestPath === undefined) {
|
||||
baseUuidToSrcVdi.delete(baseUuid)
|
||||
} else {
|
||||
this.#parentVdiPaths[vhdDir] = parentDestPath
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -123,6 +124,44 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
}
|
||||
}
|
||||
|
||||
async updateUuidAndChain({ isVhdDifferencing, vdis }) {
|
||||
assert.notStrictEqual(
|
||||
this.#vhds,
|
||||
undefined,
|
||||
'_transfer must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
|
||||
const parentVdiPaths = this.#parentVdiPaths
|
||||
const { handler } = this._adapter
|
||||
const vhds = this.#vhds
|
||||
await asyncEach(Object.entries(vdis), async ([id, vdi]) => {
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
if (isDifferencing) {
|
||||
assert.notStrictEqual(
|
||||
parentVdiPaths,
|
||||
'checkbasevdi must be called before updateUuidAndChain for incremental backups'
|
||||
)
|
||||
const parentPath = parentVdiPaths[dirname(path)]
|
||||
// we are in a incremental backup
|
||||
// we already computed the chain in checkBaseVdis
|
||||
assert.notStrictEqual(parentPath, undefined, 'A differential VHD must have a parent')
|
||||
// forbid any kind of loop
|
||||
assert.ok(basename(parentPath) < basename(path), `vhd must be sorted to be chained`)
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD if needed
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
if (!vhd.footer.uuid.equals(packUuid(vdi.uuid))) {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
const adapter = this._adapter
|
||||
const oldEntries = this._oldEntries
|
||||
@@ -141,14 +180,10 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
// update this.#vhds before eventually skipping transfer, so that
|
||||
// updateUuidAndChain has all the mandatory data
|
||||
const vhds = (this.#vhds = mapValues(
|
||||
deltaExport.vdis,
|
||||
vdi =>
|
||||
`vdis/${jobId}/${
|
||||
@@ -158,7 +193,15 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
))
|
||||
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
Object.values(deltaExport.streams).forEach(stream => stream.destroy())
|
||||
return { size: 0 }
|
||||
}
|
||||
|
||||
metadataContent = {
|
||||
isVhdDifferencing,
|
||||
@@ -174,38 +217,13 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
vm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await asyncEach(
|
||||
Object.entries(deltaExport.vdis),
|
||||
async ([id, vdi]) => {
|
||||
Object.keys(deltaExport.vdis),
|
||||
async id => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDifferencing) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
)
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||
)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
// since i += await asyncFun lead to race condition
|
||||
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||
@@ -217,17 +235,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
transferSize += transferSizeOneDisk
|
||||
|
||||
if (isDifferencing) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
},
|
||||
{
|
||||
concurrency: settings.diskPerVmConcurrency,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import assert from 'node:assert'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||
@@ -14,6 +15,7 @@ import find from 'lodash/find.js'
|
||||
|
||||
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
assert.notStrictEqual(baseVm, undefined)
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
@@ -36,7 +38,9 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
updateUuidAndChain() {
|
||||
// nothing to do, the chaining is not modified in this case
|
||||
}
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
|
||||
@@ -5,6 +5,10 @@ export class AbstractIncrementalWriter extends AbstractWriter {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
updateUuidAndChain() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
@@ -113,13 +113,13 @@ export const MixinRemoteWriter = (BaseClass = Object) =>
|
||||
)
|
||||
}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
async _isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(
|
||||
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
|
||||
await adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
|
||||
)
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
|
||||
@@ -230,6 +230,7 @@ Settings are described in [`@xen-orchestra/backups/\_runners/VmsXapi.mjs``](http
|
||||
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `updateUuidAndChain({ isVhdDifferencing, vdis })`
|
||||
- `cleanup()`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- **Full**
|
||||
|
||||
@@ -20,5 +20,7 @@ export function split(path) {
|
||||
return parts
|
||||
}
|
||||
|
||||
export const relativeFromFile = (file, path) => relative(dirname(file), path)
|
||||
// paths are made absolute otherwise fs.relative() would resolve them against working directory
|
||||
export const relativeFromFile = (file, path) => relative(dirname(normalize(file)), normalize(path))
|
||||
|
||||
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
17
@xen-orchestra/fs/src/path.test.js
Normal file
17
@xen-orchestra/fs/src/path.test.js
Normal file
@@ -0,0 +1,17 @@
|
||||
import { describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import { relativeFromFile } from './path.js'
|
||||
|
||||
describe('relativeFromFile()', function () {
|
||||
for (const [title, args] of Object.entries({
|
||||
'file absolute and path absolute': ['/foo/bar/file.vhd', '/foo/baz/path.vhd'],
|
||||
'file relative and path absolute': ['foo/bar/file.vhd', '/foo/baz/path.vhd'],
|
||||
'file absolute and path relative': ['/foo/bar/file.vhd', 'foo/baz/path.vhd'],
|
||||
'file relative and path relative': ['foo/bar/file.vhd', 'foo/baz/path.vhd'],
|
||||
})) {
|
||||
it('works with ' + title, function () {
|
||||
assert.equal(relativeFromFile(...args), '../baz/path.vhd')
|
||||
})
|
||||
}
|
||||
})
|
||||
@@ -54,10 +54,10 @@ async function handleExistingFile(root, indexPath, path) {
|
||||
await indexFile(fullPath, indexPath)
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.code !== 'EEXIST') {
|
||||
} catch (error) {
|
||||
if (error.code !== 'EEXIST') {
|
||||
// there can be a symbolic link in the tree
|
||||
warn('handleExistingFile', err)
|
||||
warn('handleExistingFile', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -106,7 +106,7 @@ export async function watchRemote(remoteId, { root, immutabilityDuration, rebuil
|
||||
await File.liftImmutability(settingPath)
|
||||
} catch (error) {
|
||||
// file may not exists, and it's not really a problem
|
||||
info('lifting immutability on current settings', error)
|
||||
info('lifting immutability on current settings', { error })
|
||||
}
|
||||
await fs.writeFile(
|
||||
settingPath,
|
||||
|
||||
@@ -50,7 +50,17 @@ const RRD_POINTS_PER_STEP: { [key in RRD_STEP]: number } = {
|
||||
// Utils
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function convertNanToNull(value: number) {
|
||||
function parseNumber(value: number | string) {
|
||||
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
|
||||
// strings to support NaN, Infinity and -Infinity
|
||||
if (typeof value === 'string') {
|
||||
const asNumber = +value
|
||||
if (isNaN(asNumber) && value !== 'NaN') {
|
||||
throw new Error('cannot parse number: ' + value)
|
||||
}
|
||||
value = asNumber
|
||||
}
|
||||
|
||||
return isNaN(value) ? null : value
|
||||
}
|
||||
|
||||
@@ -59,7 +69,7 @@ function convertNanToNull(value: number) {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const computeValues = (dataRow: any, legendIndex: number, transformValue = identity) =>
|
||||
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
|
||||
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
|
||||
|
||||
const createGetProperty = (obj: object, property: string, defaultValue: unknown) =>
|
||||
defaults(obj, { [property]: defaultValue })[property] as any
|
||||
@@ -319,8 +329,14 @@ export default class XapiStats {
|
||||
},
|
||||
abortSignal,
|
||||
})
|
||||
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
|
||||
return JSON5.parse(await resp.text())
|
||||
const text = await resp.text()
|
||||
try {
|
||||
// starting from XAPI 23.31, the response is valid JSON
|
||||
return JSON.parse(text)
|
||||
} catch (error) {
|
||||
// eslint-disable-next-line import/no-named-as-default-member -- https://github.com/json5/json5/issues/287
|
||||
return JSON5.parse(text)
|
||||
}
|
||||
}
|
||||
|
||||
// To avoid multiple requests, we keep a cache for the stats and
|
||||
@@ -383,7 +399,10 @@ export default class XapiStats {
|
||||
abortSignal,
|
||||
})
|
||||
|
||||
const actualStep = json.meta.step as number
|
||||
const actualStep = parseNumber(json.meta.step)
|
||||
if (actualStep !== step) {
|
||||
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
|
||||
}
|
||||
|
||||
if (json.data.length > 0) {
|
||||
// fetched data is organized from the newest to the oldest
|
||||
@@ -407,14 +426,15 @@ export default class XapiStats {
|
||||
|
||||
let stepStats = xoObjectStats[actualStep]
|
||||
let cacheStepStats = cacheXoObjectStats[actualStep]
|
||||
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
|
||||
const endTimestamp = parseNumber(json.meta.end)
|
||||
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
|
||||
stepStats = xoObjectStats[actualStep] = {
|
||||
endTimestamp: json.meta.end,
|
||||
endTimestamp,
|
||||
interval: actualStep,
|
||||
canBeExpired: false,
|
||||
}
|
||||
cacheStepStats = cacheXoObjectStats[actualStep] = {
|
||||
endTimestamp: json.meta.end,
|
||||
endTimestamp,
|
||||
interval: actualStep,
|
||||
canBeExpired: true,
|
||||
}
|
||||
@@ -438,10 +458,6 @@ export default class XapiStats {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if (actualStep !== step) {
|
||||
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.name === 'AbortError') {
|
||||
return
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
{
|
||||
"extends": "@vue/tsconfig/tsconfig.dom.json",
|
||||
"include": ["env.d.ts", "src/**/*", "src/**/*.vue"],
|
||||
"include": ["env.d.ts", "src/**/*", "src/**/*.vue", "../web-core/lib/**/*", "../web-core/lib/**/*.vue"],
|
||||
"exclude": ["src/**/__tests__/*"],
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
"noEmit": true,
|
||||
"baseUrl": ".",
|
||||
"rootDir": "..",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
"@/*": ["./src/*"],
|
||||
"@core/*": ["../web-core/lib/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ export default defineConfig({
|
||||
resolve: {
|
||||
alias: {
|
||||
'@': fileURLToPath(new URL('./src', import.meta.url)),
|
||||
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
|
||||
},
|
||||
},
|
||||
|
||||
|
||||
@@ -27,6 +27,16 @@ log.error('could not join server', {
|
||||
})
|
||||
```
|
||||
|
||||
A logging method has the following signature:
|
||||
|
||||
```ts
|
||||
interface LoggingMethod {
|
||||
(error): void
|
||||
|
||||
(message: string, data?: { error?: Error; [property: string]: any }): void
|
||||
}
|
||||
```
|
||||
|
||||
### Consumer
|
||||
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
@@ -45,6 +45,16 @@ log.error('could not join server', {
|
||||
})
|
||||
```
|
||||
|
||||
A logging method has the following signature:
|
||||
|
||||
```ts
|
||||
interface LoggingMethod {
|
||||
(error): void
|
||||
|
||||
(message: string, data?: { error?: Error; [property: string]: any }): void
|
||||
}
|
||||
```
|
||||
|
||||
### Consumer
|
||||
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
}
|
||||
},
|
||||
"devDependencies": {
|
||||
"vue": "^3.4.13"
|
||||
"vue": "^3.4.13",
|
||||
"@vue/tsconfig": "^0.5.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/web-core",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
@@ -25,6 +26,6 @@
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=18"
|
||||
}
|
||||
}
|
||||
|
||||
12
@xen-orchestra/web-core/tsconfig.json
Normal file
12
@xen-orchestra/web-core/tsconfig.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"extends": "@vue/tsconfig/tsconfig.dom.json",
|
||||
"include": ["env.d.ts", "lib/**/*", "lib/**/*.vue"],
|
||||
"exclude": ["lib/**/__tests__/*"],
|
||||
"compilerOptions": {
|
||||
"noEmit": true,
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@core/*": ["./lib/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,22 @@
|
||||
{
|
||||
"extends": "@vue/tsconfig/tsconfig.dom.json",
|
||||
"include": ["env.d.ts", "typed-router.d.ts", "src/**/*", "src/**/*.vue"],
|
||||
"include": [
|
||||
"env.d.ts",
|
||||
"typed-router.d.ts",
|
||||
"src/**/*",
|
||||
"src/**/*.vue",
|
||||
"../web-core/lib/**/*",
|
||||
"../web-core/lib/**/*.vue"
|
||||
],
|
||||
"exclude": ["src/**/__tests__/*"],
|
||||
"compilerOptions": {
|
||||
"composite": true,
|
||||
"noEmit": true,
|
||||
"baseUrl": ".",
|
||||
"rootDir": "..",
|
||||
"paths": {
|
||||
"@/*": ["./src/*"]
|
||||
"@/*": ["./src/*"],
|
||||
"@core/*": ["../web-core/lib/*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ export default defineConfig({
|
||||
resolve: {
|
||||
alias: {
|
||||
'@': fileURLToPath(new URL('./src', import.meta.url)),
|
||||
'@core': fileURLToPath(new URL('../web-core/lib', import.meta.url)),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -21,12 +21,23 @@ export default class Vif {
|
||||
MAC = '',
|
||||
} = {}
|
||||
) {
|
||||
if (device === undefined) {
|
||||
const allowedDevices = await this.call('VM.get_allowed_VIF_devices', VM)
|
||||
if (allowedDevices.length === 0) {
|
||||
const error = new Error('could not find an allowed VIF device')
|
||||
error.poolUuid = this.pool.uuid
|
||||
error.vmRef = VM
|
||||
throw error
|
||||
}
|
||||
|
||||
device = allowedDevices[0]
|
||||
}
|
||||
|
||||
const [powerState, ...rest] = await Promise.all([
|
||||
this.getField('VM', VM, 'power_state'),
|
||||
device ?? (await this.call('VM.get_allowed_VIF_devices', VM))[0],
|
||||
MTU ?? (await this.getField('network', network, 'MTU')),
|
||||
MTU ?? this.getField('network', network, 'MTU'),
|
||||
])
|
||||
;[device, MTU] = rest
|
||||
;[MTU] = rest
|
||||
|
||||
const vifRef = await this.call('VIF.create', {
|
||||
currently_attached: powerState === 'Suspended' ? currently_attached : undefined,
|
||||
|
||||
@@ -8,6 +8,11 @@
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- Disable search engine indexing via a `robots.txt`
|
||||
- [Stats] Support format used by XAPI 23.31
|
||||
- [REST API] Export host [SMT](https://en.wikipedia.org/wiki/Simultaneous_multithreading) status at `/hosts/:id/smt` [Forum#71374](https://xcp-ng.org/forum/post/71374)
|
||||
- [Home & REST API] `$container` field of an halted VM now points to a host if a VDI is on a local storage [Forum#71769](https://xcp-ng.org/forum/post/71769)
|
||||
- [Size Input] Ability to select two new units in the dropdown (`TiB`, `PiB`) (PR [#7382](https://github.com/vatesfr/xen-orchestra/pull/7382))
|
||||
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -15,6 +20,9 @@
|
||||
|
||||
- [Settings/XO Config] Sort backups from newest to oldest
|
||||
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
|
||||
- [Remotes] Correctly clear error when the remote is tested with success
|
||||
- [Import/VMWare] Fix importing last snapshot (PR [#7370](https://github.com/vatesfr/xen-orchestra/pull/7370))
|
||||
- [Host/Reboot] Fix false positive warning when restarting an host after updates (PR [#7366](https://github.com/vatesfr/xen-orchestra/pull/7366))
|
||||
|
||||
### Packages to release
|
||||
|
||||
@@ -32,8 +40,12 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- xo-server patch
|
||||
- @xen-orchestra/backups patch
|
||||
- @xen-orchestra/fs patch
|
||||
- @xen-orchestra/xapi patch
|
||||
- vhd-lib patch
|
||||
- xo-server minor
|
||||
- xo-server-audit patch
|
||||
- xo-web patch
|
||||
- xo-web minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
15
docs/xoa.md
15
docs/xoa.md
@@ -93,6 +93,21 @@ Follow the instructions:
|
||||
|
||||
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
|
||||
If you want to use static IP address for your appliance:
|
||||
|
||||
```sh
|
||||
xe vm-param-set uuid="$uuid" \
|
||||
xenstore-data:vm-data/ip="$ip" \
|
||||
xenstore-data:vm-data/netmask="$netmask" \
|
||||
xenstore-data:vm-data/gateway="$gateway"
|
||||
```
|
||||
|
||||
If you want to replace the default DNS server:
|
||||
|
||||
```sh
|
||||
xe vm-param-set uuid="$uuid" xenstore-data:vm-data/dns="$dns"
|
||||
```
|
||||
|
||||
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
|
||||
|
||||
## First console connection
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{
|
||||
"name": "xen-orchestra",
|
||||
"version": "0.0.0",
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/eslint-parser": "^7.13.8",
|
||||
@@ -94,7 +96,7 @@
|
||||
},
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
|
||||
"build": "TURBO_TELEMETRY_DISABLED=1 turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
|
||||
"build:xo-lite": "turbo run build --scope @xen-orchestra/lite",
|
||||
"clean": "scripts/run-script.js --parallel clean",
|
||||
"dev": "scripts/run-script.js --parallel --concurrency 0 --verbose dev",
|
||||
|
||||
@@ -46,9 +46,9 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
}
|
||||
|
||||
get compressionType() {
|
||||
const compressionType = this.vhds[0].compressionType
|
||||
for (let i = 0; i < this.vhds.length; i++) {
|
||||
if (compressionType !== this.vhds[i].compressionType) {
|
||||
const compressionType = this.#vhds[0].compressionType
|
||||
for (let i = 0; i < this.#vhds.length; i++) {
|
||||
if (compressionType !== this.#vhds[i].compressionType) {
|
||||
return 'MIXED'
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { dirname, relative } = require('path')
|
||||
const { relativeFromFile } = require('@xen-orchestra/fs/path')
|
||||
|
||||
const { openVhd } = require('./openVhd')
|
||||
const { DISK_TYPES } = require('./_constants')
|
||||
@@ -21,7 +21,7 @@ module.exports = async function chain(parentHandler, parentPath, childHandler, c
|
||||
}
|
||||
await childVhd.readBlockAllocationTable()
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
const parentName = relativeFromFile(childPath, parentPath)
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.setUniqueParentLocator(parentName)
|
||||
|
||||
@@ -27,7 +27,7 @@ async function sendToNagios(app, jobName, vmBackupInfo) {
|
||||
jobName
|
||||
)
|
||||
} catch (error) {
|
||||
warn('sendToNagios:', error)
|
||||
warn('sendToNagios:', { error })
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import semver from 'semver'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import assert from 'assert'
|
||||
import { format } from 'json-rpc-peer'
|
||||
@@ -136,13 +137,38 @@ export async function restart({
|
||||
const pool = this.getObject(host.$poolId, 'pool')
|
||||
const master = this.getObject(pool.master, 'host')
|
||||
const hostRebootRequired = host.rebootRequired
|
||||
if (hostRebootRequired && host.id !== master.id && host.version === master.version) {
|
||||
throw incorrectState({
|
||||
actual: hostRebootRequired,
|
||||
expected: false,
|
||||
object: master.id,
|
||||
property: 'rebootRequired',
|
||||
})
|
||||
|
||||
// we are currently in an host upgrade process
|
||||
if (hostRebootRequired && host.id !== master.id) {
|
||||
// this error is not ideal but it means that the pool master must be fully upgraded/rebooted before the current host can be rebooted.
|
||||
//
|
||||
// there is a single error for the 3 cases because the client must handle them the same way
|
||||
const throwError = () =>
|
||||
incorrectState({
|
||||
actual: hostRebootRequired,
|
||||
expected: false,
|
||||
object: master.id,
|
||||
property: 'rebootRequired',
|
||||
})
|
||||
|
||||
if (semver.lt(master.version, host.version)) {
|
||||
log.error(`master version (${master.version}) is older than the host version (${host.version})`, {
|
||||
masterId: master.id,
|
||||
hostId: host.id,
|
||||
})
|
||||
throwError()
|
||||
}
|
||||
|
||||
if (semver.eq(master.version, host.version)) {
|
||||
if ((await this.getXapi(host).listMissingPatches(master._xapiId)).length > 0) {
|
||||
log.error('master has missing patches', { masterId: master.id })
|
||||
throwError()
|
||||
}
|
||||
if (master.rebootRequired) {
|
||||
log.error('master needs to reboot')
|
||||
throwError()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
27
packages/xo-server/src/api/pusb.mjs
Normal file
27
packages/xo-server/src/api/pusb.mjs
Normal file
@@ -0,0 +1,27 @@
|
||||
export async function scan({ host }) {
|
||||
await this.getXapi(host).call('PUSB.scan', host._xapiRef)
|
||||
}
|
||||
|
||||
scan.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
scan.resolve = {
|
||||
host: ['host', 'host', 'operate'],
|
||||
}
|
||||
|
||||
export async function set({ pusb, enabled }) {
|
||||
const xapi = this.getXapi(pusb)
|
||||
|
||||
if (enabled !== undefined && enabled !== pusb.passthroughEnabled) {
|
||||
await xapi.call('PUSB.set_passthrough_enabled', pusb._xapiRef, enabled)
|
||||
}
|
||||
}
|
||||
|
||||
set.params = {
|
||||
id: { type: 'string' },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
set.resolve = {
|
||||
pusb: ['id', 'PUSB', 'administrate'],
|
||||
}
|
||||
@@ -328,6 +328,34 @@ const TRANSFORMS = {
|
||||
|
||||
const { creation } = xoData.extract(obj) ?? {}
|
||||
|
||||
let $container
|
||||
if (obj.resident_on !== 'OpaqueRef:NULL') {
|
||||
// resident_on is set when the VM is running (or paused or suspended on a host)
|
||||
$container = link(obj, 'resident_on')
|
||||
} else {
|
||||
// if the VM is halted, the $container is the pool
|
||||
$container = link(obj, 'pool')
|
||||
|
||||
// unless one of its VDI is on a non shared SR
|
||||
//
|
||||
// linked objects may not be there when this code run, and it will only be
|
||||
// refreshed when the VM XAPI record change, this value is not guaranteed
|
||||
// to be up-to-date, but it practice it appears to work fine thanks to
|
||||
// `VBDs` and `current_operations` changing when a VDI is
|
||||
// added/removed/migrated
|
||||
for (const vbd of obj.$VBDs) {
|
||||
const sr = vbd?.$VDI?.$SR
|
||||
if (sr !== undefined && !sr.shared) {
|
||||
const pbd = sr.$PBDs[0]
|
||||
const hostId = pbd && link(pbd, 'host')
|
||||
if (hostId !== undefined) {
|
||||
$container = hostId
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const vm = {
|
||||
// type is redefined after for controllers/, templates &
|
||||
// snapshots.
|
||||
@@ -422,8 +450,7 @@ const TRANSFORMS = {
|
||||
xenTools,
|
||||
...getVmGuestToolsProps(obj),
|
||||
|
||||
// TODO: handle local VMs (`VM.get_possible_hosts()`).
|
||||
$container: isRunning ? link(obj, 'resident_on') : link(obj, 'pool'),
|
||||
$container,
|
||||
$VBDs: link(obj, 'VBDs'),
|
||||
|
||||
// TODO: dedupe
|
||||
@@ -862,6 +889,17 @@ const TRANSFORMS = {
|
||||
vm: link(obj, 'VM'),
|
||||
}
|
||||
},
|
||||
|
||||
pusb(obj) {
|
||||
return {
|
||||
type: 'PUSB',
|
||||
|
||||
description: obj.description,
|
||||
host: link(obj, 'host'),
|
||||
passthroughEnabled: obj.passthrough_enabled,
|
||||
usbGroup: link(obj, 'USB_group'),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -45,7 +45,17 @@ const RRD_POINTS_PER_STEP = {
|
||||
// Utils
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function convertNanToNull(value) {
|
||||
function parseNumber(value) {
|
||||
// Starting from XAPI 23.31, numbers in the JSON payload are encoded as
|
||||
// strings to support NaN, Infinity and -Infinity
|
||||
if (typeof value === 'string') {
|
||||
const asNumber = +value
|
||||
if (isNaN(asNumber) && value !== 'NaN') {
|
||||
throw new Error('cannot parse number: ' + value)
|
||||
}
|
||||
value = asNumber
|
||||
}
|
||||
|
||||
return isNaN(value) ? null : value
|
||||
}
|
||||
|
||||
@@ -58,7 +68,7 @@ async function getServerTimestamp(xapi, hostRef) {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
|
||||
map(dataRow, ({ values }) => transformValue(convertNanToNull(values[legendIndex])))
|
||||
map(dataRow, ({ values }) => transformValue(parseNumber(values[legendIndex])))
|
||||
|
||||
const combineStats = (stats, path, combineValues) => zipWith(...map(stats, path), (...values) => combineValues(values))
|
||||
|
||||
@@ -245,7 +255,15 @@ export default class XapiStats {
|
||||
start: timestamp,
|
||||
},
|
||||
})
|
||||
.then(response => response.text().then(JSON5.parse))
|
||||
.then(response => response.text())
|
||||
.then(data => {
|
||||
try {
|
||||
// starting from XAPI 23.31, the response is valid JSON
|
||||
return JSON.parse(data)
|
||||
} catch (_) {
|
||||
return JSON5.parse(data)
|
||||
}
|
||||
})
|
||||
.catch(err => {
|
||||
delete this.#hostCache[hostUuid][step]
|
||||
throw err
|
||||
@@ -299,7 +317,7 @@ export default class XapiStats {
|
||||
// To avoid crossing over the boundary, we ask for one less step
|
||||
const optimumTimestamp = currentTimeStamp - maxDuration + step
|
||||
const json = await this._getJson(xapi, host, optimumTimestamp, step)
|
||||
const actualStep = json.meta.step
|
||||
const actualStep = parseNumber(json.meta.step)
|
||||
|
||||
if (actualStep !== step) {
|
||||
throw new FaultyGranularity(`Unable to get the true granularity: ${actualStep}`)
|
||||
@@ -326,9 +344,10 @@ export default class XapiStats {
|
||||
return
|
||||
}
|
||||
|
||||
if (stepStats === undefined || stepStats.endTimestamp !== json.meta.end) {
|
||||
const endTimestamp = parseNumber(json.meta.end)
|
||||
if (stepStats === undefined || stepStats.endTimestamp !== endTimestamp) {
|
||||
stepStats = {
|
||||
endTimestamp: json.meta.end,
|
||||
endTimestamp,
|
||||
interval: actualStep,
|
||||
stats: {},
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ export default class MigrateVm {
|
||||
const stream = vhd.stream()
|
||||
await vdi.$importContent(stream, { format: VDI_FORMAT_VHD })
|
||||
}
|
||||
return vhd
|
||||
return { vdi, vhd }
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import asyncMapSettled from '@xen-orchestra/async-map/legacy.js'
|
||||
import { basename } from 'path'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { format, parse } from 'xo-remote-parser'
|
||||
import {
|
||||
DEFAULT_ENCRYPTION_ALGORITHM,
|
||||
@@ -17,17 +18,35 @@ import { Remotes } from '../models/remote.mjs'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const { warn } = createLogger('xo:mixins:remotes')
|
||||
|
||||
const obfuscateRemote = ({ url, ...remote }) => {
|
||||
const parsedUrl = parse(url)
|
||||
remote.url = format(sensitiveValues.obfuscate(parsedUrl))
|
||||
return remote
|
||||
}
|
||||
|
||||
function validatePath(url) {
|
||||
const { path } = parse(url)
|
||||
// these properties should be defined on the remote object itself and not as
|
||||
// part of the remote URL
|
||||
//
|
||||
// there is a bug somewhere that keep putting them into the URL, this list
|
||||
// is here to help track it
|
||||
const INVALID_URL_PARAMS = ['benchmarks', 'id', 'info', 'name', 'proxy', 'enabled', 'error', 'url']
|
||||
|
||||
function validateUrl(url) {
|
||||
const parsedUrl = parse(url)
|
||||
|
||||
const { path } = parsedUrl
|
||||
if (path !== undefined && basename(path) === 'xo-vm-backups') {
|
||||
throw invalidParameters('remote url should not end with xo-vm-backups')
|
||||
}
|
||||
|
||||
for (const param of INVALID_URL_PARAMS) {
|
||||
if (Object.hasOwn(parsedUrl, param)) {
|
||||
// log with stack trace
|
||||
warn(new Error('invalid remote URL param ' + param))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default class {
|
||||
@@ -182,6 +201,22 @@ export default class {
|
||||
if (remote === undefined) {
|
||||
throw noSuchObject(id, 'remote')
|
||||
}
|
||||
|
||||
const parsedUrl = parse(remote.url)
|
||||
let fixed = false
|
||||
for (const param of INVALID_URL_PARAMS) {
|
||||
if (Object.hasOwn(parsedUrl, param)) {
|
||||
// delete the value to trace its real origin when it's added back
|
||||
// with `updateRemote()`
|
||||
delete parsedUrl[param]
|
||||
fixed = true
|
||||
}
|
||||
}
|
||||
if (fixed) {
|
||||
remote.url = format(parsedUrl)
|
||||
this._remotes.update(remote).catch(warn)
|
||||
}
|
||||
|
||||
return remote
|
||||
}
|
||||
|
||||
@@ -202,7 +237,7 @@ export default class {
|
||||
}
|
||||
|
||||
async createRemote({ name, options, proxy, url }) {
|
||||
validatePath(url)
|
||||
validateUrl(url)
|
||||
|
||||
const params = {
|
||||
enabled: false,
|
||||
@@ -219,6 +254,10 @@ export default class {
|
||||
}
|
||||
|
||||
updateRemote(id, { enabled, name, options, proxy, url }) {
|
||||
if (url !== undefined) {
|
||||
validateUrl(url)
|
||||
}
|
||||
|
||||
const handlers = this._handlers
|
||||
const handler = handlers[id]
|
||||
if (handler !== undefined) {
|
||||
@@ -238,7 +277,7 @@ export default class {
|
||||
@synchronized()
|
||||
async _updateRemote(id, { url, ...props }) {
|
||||
if (url !== undefined) {
|
||||
validatePath(url)
|
||||
validateUrl(url)
|
||||
}
|
||||
|
||||
const remote = await this._getRemote(id)
|
||||
|
||||
@@ -253,6 +253,10 @@ export default class RestApi {
|
||||
const host = req.xapiObject
|
||||
res.json(await host.$xapi.listMissingPatches(host))
|
||||
},
|
||||
|
||||
async smt({ xapiObject }, res) {
|
||||
res.json({ enabled: await xapiObject.$xapi.isHyperThreadingEnabled(xapiObject.$id) })
|
||||
},
|
||||
}
|
||||
|
||||
collections.pools.routes = {
|
||||
|
||||
@@ -99,7 +99,7 @@ test('An ova file is generated correctly', async () => {
|
||||
try {
|
||||
await execXmllint(xml, [
|
||||
'--schema',
|
||||
path.join(__dirname, 'ova-schema', 'dsp8023_1.1.1.xsd'),
|
||||
path.join(__dirname, '..', 'src', 'ova-schema', 'dsp8023_1.1.1.xsd'),
|
||||
'--noout',
|
||||
'--nonet',
|
||||
'-',
|
||||
|
||||
@@ -138,7 +138,7 @@ export class Range extends Component {
|
||||
|
||||
export Toggle from './toggle'
|
||||
|
||||
const UNITS = ['kiB', 'MiB', 'GiB']
|
||||
const UNITS = ['kiB', 'MiB', 'GiB', 'TiB', 'PiB']
|
||||
const DEFAULT_UNIT = 'GiB'
|
||||
|
||||
export class SizeInput extends BaseComponent {
|
||||
|
||||
@@ -54,13 +54,9 @@ export default class IsoDevice extends Component {
|
||||
() => this.props.vm.$pool,
|
||||
() => this.props.vm.$container,
|
||||
(vmPool, vmContainer) => sr => {
|
||||
const vmRunning = vmContainer !== vmPool
|
||||
const sameHost = vmContainer === sr.$container
|
||||
const samePool = vmPool === sr.$pool
|
||||
|
||||
return (
|
||||
samePool &&
|
||||
(vmRunning ? sr.shared || sameHost : true) &&
|
||||
vmPool === sr.$pool &&
|
||||
(sr.shared || vmContainer === sr.$container) &&
|
||||
(sr.SR_type === 'iso' || (sr.SR_type === 'udev' && sr.size))
|
||||
)
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ export const reportOnSupportPanel = async ({ files = [], formatMessage = identit
|
||||
ADDITIONAL_FILES.map(({ fetch, name }) =>
|
||||
timeout.call(fetch(), ADDITIONAL_FILES_FETCH_TIMEOUT).then(
|
||||
file => formData.append('attachments', createBlobFromString(file), name),
|
||||
error => logger.warn(`cannot get ${name}`, error)
|
||||
error => logger.warn(`cannot get ${name}`, { error })
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ import _ from 'intl'
|
||||
import Copiable from 'copiable'
|
||||
import decorate from 'apply-decorators'
|
||||
import Icon from 'icon'
|
||||
import map from 'lodash/map'
|
||||
import React from 'react'
|
||||
import store from 'store'
|
||||
import HomeTags from 'home-tags'
|
||||
@@ -24,10 +23,21 @@ export default decorate([
|
||||
provideState({
|
||||
computed: {
|
||||
areHostsVersionsEqual: ({ areHostsVersionsEqualByPool }, { host }) => areHostsVersionsEqualByPool[host.$pool],
|
||||
inMemoryVms: (_, { vms }) => {
|
||||
const result = []
|
||||
for (const key of Object.keys(vms)) {
|
||||
const vm = vms[key]
|
||||
const { power_state } = vm
|
||||
if (power_state === 'Running' || power_state === 'Paused') {
|
||||
result.push(vm)
|
||||
}
|
||||
}
|
||||
return result
|
||||
},
|
||||
},
|
||||
}),
|
||||
injectState,
|
||||
({ statsOverview, host, nVms, vmController, vms, state: { areHostsVersionsEqual } }) => {
|
||||
({ statsOverview, host, nVms, vmController, state: { areHostsVersionsEqual, inMemoryVms } }) => {
|
||||
const pool = getObject(store.getState(), host.$pool)
|
||||
const vmsFilter = encodeURIComponent(new CM.Property('$container', new CM.String(host.id)).toString())
|
||||
return (
|
||||
@@ -120,7 +130,7 @@ export default decorate([
|
||||
tooltip={`${host.productBrand} (${formatSize(vmController.memory.size)})`}
|
||||
value={vmController.memory.size}
|
||||
/>
|
||||
{map(vms, vm => (
|
||||
{inMemoryVms.map(vm => (
|
||||
<UsageElement
|
||||
tooltip={`${vm.name_label} (${formatSize(vm.memory.size)})`}
|
||||
key={vm.id}
|
||||
|
||||
@@ -33,7 +33,7 @@ const formatError = error => (typeof error === 'string' ? error : JSON.stringify
|
||||
|
||||
const _changeUrlElement = (value, { remote, element }) =>
|
||||
editRemote(remote, {
|
||||
url: format({ ...remote, [element]: value === null ? undefined : value }),
|
||||
url: format({ ...parse(remote.url), [element]: value === null ? undefined : value }),
|
||||
})
|
||||
const _showError = remote => alert(_('remoteConnectionFailed'), <pre>{formatError(remote.error)}</pre>)
|
||||
const _editRemoteName = (name, { remote }) => editRemote(remote, { name })
|
||||
|
||||
Reference in New Issue
Block a user