Compare commits

..

2 Commits

Author SHA1 Message Date
Julien Fontanet
402bdbf656 WiP: refactor(xo-server/rest-api): declarative approach 2023-11-14 15:31:43 +01:00
Julien Fontanet
d27698d131 fix(proxy/appliance): fix warning message 2023-11-14 10:12:41 +01:00
114 changed files with 1951 additions and 4986 deletions

View File

@@ -42,8 +42,8 @@ function get(node, i, keys) {
? node.value
: node
: node instanceof Node
? get(node.children.get(keys[i]), i + 1, keys)
: undefined
? get(node.children.get(keys[i]), i + 1, keys)
: undefined
}
function set(node, i, keys, value) {

View File

@@ -1,7 +1,7 @@
import assert from 'node:assert'
import { Socket } from 'node:net'
import { connect } from 'node:tls'
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
import { readChunkStrict } from '@vates/read-chunk'
import { createLogger } from '@xen-orchestra/log'
@@ -21,7 +21,6 @@ import {
OPTS_MAGIC,
NBD_CMD_DISC,
} from './constants.mjs'
import { Readable } from 'node:stream'
const { warn } = createLogger('vates:nbd-client')
@@ -41,7 +40,6 @@ export default class NbdClient {
#readBlockRetries
#reconnectRetry
#connectTimeout
#messageTimeout
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
@@ -54,14 +52,7 @@ export default class NbdClient {
#reconnectingPromise
constructor(
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
{
connectTimeout = 6e4,
messageTimeout = 6e4,
waitBeforeReconnect = 1e3,
readAhead = 10,
readBlockRetries = 5,
reconnectRetry = 5,
} = {}
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
) {
this.#serverAddress = address
this.#serverPort = port
@@ -72,7 +63,6 @@ export default class NbdClient {
this.#readBlockRetries = readBlockRetries
this.#reconnectRetry = reconnectRetry
this.#connectTimeout = connectTimeout
this.#messageTimeout = messageTimeout
}
get exportSize() {
@@ -126,24 +116,12 @@ export default class NbdClient {
return
}
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
buffer.writeBigUInt64BE(queryId, 8)
buffer.writeBigUInt64BE(0n, 16)
buffer.writeInt32BE(0, 24)
const promise = pFromCallback(cb => {
this.#serverSocket.end(buffer, 'utf8', cb)
})
try {
await pTimeout.call(promise, this.#messageTimeout)
} catch (error) {
this.#serverSocket.destroy()
}
await this.#write(buffer)
await this.#serverSocket.destroy()
this.#serverSocket = undefined
this.#connected = false
}
@@ -217,13 +195,11 @@ export default class NbdClient {
}
#read(length) {
const promise = readChunkStrict(this.#serverSocket, length)
return pTimeout.call(promise, this.#messageTimeout)
return readChunkStrict(this.#serverSocket, length)
}
#write(buffer) {
const promise = fromCallback.call(this.#serverSocket, 'write', buffer)
return pTimeout.call(promise, this.#messageTimeout)
return fromCallback.call(this.#serverSocket, 'write', buffer)
}
async #readInt32() {
@@ -256,20 +232,19 @@ export default class NbdClient {
}
try {
this.#waitingForResponse = true
const buffer = await this.#read(16)
const magic = buffer.readInt32BE(0)
const magic = await this.#readInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
}
const error = buffer.readInt32BE(4)
const error = await this.#readInt32()
if (error !== 0) {
// @todo use error code from constants.mjs
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = buffer.readBigUInt64BE(8)
const blockQueryId = await this.#readInt64()
const query = this.#commandQueryBacklog.get(blockQueryId)
if (!query) {
throw new Error(` no query associated with id ${blockQueryId}`)
@@ -306,13 +281,7 @@ export default class NbdClient {
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
const offset = BigInt(index) * BigInt(size)
const remaining = this.#exportSize - offset
if (remaining < BigInt(size)) {
size = Number(remaining)
}
buffer.writeBigUInt64BE(offset, 16)
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
@@ -338,15 +307,14 @@ export default class NbdClient {
})
}
async *readBlocks(indexGenerator = 2 * 1024 * 1024) {
async *readBlocks(indexGenerator) {
// default : read all blocks
if (typeof indexGenerator === 'number') {
const exportSize = Number(this.#exportSize)
const chunkSize = indexGenerator
if (indexGenerator === undefined) {
const exportSize = this.#exportSize
const chunkSize = 2 * 1024 * 1024
indexGenerator = function* () {
const nbBlocks = Math.ceil(exportSize / chunkSize)
for (let index = 0; index < nbBlocks; index++) {
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
for (let index = 0; BigInt(index) < nbBlocks; index++) {
yield { index, size: chunkSize }
}
}
@@ -380,15 +348,4 @@ export default class NbdClient {
yield readAhead.shift()
}
}
stream(chunkSize) {
async function* iterator() {
for await (const chunk of this.readBlocks(chunkSize)) {
yield chunk
}
}
// create a readable stream instead of returning the iterator
// since iterators don't like unshift and partial reading
return Readable.from(iterator())
}
}

View File

@@ -22,41 +22,41 @@ const readChunk = (stream, size) =>
stream.errored != null
? Promise.reject(stream.errored)
: stream.closed || stream.readableEnded
? Promise.resolve(null)
: new Promise((resolve, reject) => {
if (size !== undefined) {
assert(size > 0)
? Promise.resolve(null)
: new Promise((resolve, reject) => {
if (size !== undefined) {
assert(size > 0)
// per Node documentation:
// > The size argument must be less than or equal to 1 GiB.
assert(size < 1073741824)
}
// per Node documentation:
// > The size argument must be less than or equal to 1 GiB.
assert(size < 1073741824)
}
function onEnd() {
resolve(null)
function onEnd() {
resolve(null)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read(size)
if (data !== null) {
resolve(data)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read(size)
if (data !== null) {
resolve(data)
removeListeners()
}
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
exports.readChunk = readChunk
/**
@@ -111,42 +111,42 @@ async function skip(stream, size) {
return stream.errored != null
? Promise.reject(stream.errored)
: size === 0 || stream.closed || stream.readableEnded
? Promise.resolve(0)
: new Promise((resolve, reject) => {
let left = size
function onEnd() {
resolve(size - left)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read()
left -= data === null ? 0 : data.length
if (left > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (left < 0) {
stream.unshift(data.slice(left))
}
resolve(size)
removeListeners()
? Promise.resolve(0)
: new Promise((resolve, reject) => {
let left = size
function onEnd() {
resolve(size - left)
removeListeners()
}
function onError(error) {
reject(error)
removeListeners()
}
function onReadable() {
const data = stream.read()
left -= data === null ? 0 : data.length
if (left > 0) {
// continue to read
} else {
// if more than wanted has been read, push back the rest
if (left < 0) {
stream.unshift(data.slice(left))
}
resolve(size)
removeListeners()
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
function removeListeners() {
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
stream.removeListener('readable', onReadable)
}
stream.on('end', onEnd)
stream.on('error', onError)
stream.on('readable', onReadable)
onReadable()
})
}
exports.skip = skip

View File

@@ -5,16 +5,6 @@ import { importIncrementalVm } from './_incrementalVm.mjs'
import { Task } from './Task.mjs'
import { watchStreamSize } from './_watchStreamSize.mjs'
async function resolveUuid(xapi, cache, uuid, type) {
if (uuid == null) {
return uuid
}
const ref = cache.get(uuid)
if (ref === undefined) {
cache.set(uuid, xapi.call(`${type}.get_by_uuid`, uuid))
}
return cache.get(uuid)
}
export class ImportVmBackup {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
this._adapter = adapter
@@ -24,29 +14,13 @@ export class ImportVmBackup {
this._xapi = xapi
}
async #decorateIncrementalVmMetadata(backup) {
const { mapVdisSrs } = this._importIncrementalVmSettings
const xapi = this._xapi
const cache = new Map()
const mapVdisSrRefs = {}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const sr = await resolveUuid(xapi, cache, this._srUuid, 'SR')
Object.values(backup.vdis).forEach(vdi => {
vdi.SR = mapVdisSrRefs[vdi.uuid] ?? sr.$ref
})
return backup
}
async run() {
const adapter = this._adapter
const metadata = this._metadata
const isFull = metadata.mode === 'full'
const sizeContainer = { size: 0 }
const { mapVdisSrs, newMacAddresses } = this._importIncrementalVmSettings
let backup
if (isFull) {
backup = await adapter.readFullVmBackup(metadata)
@@ -55,11 +29,11 @@ export class ImportVmBackup {
assert.strictEqual(metadata.mode, 'delta')
const ignoredVdis = new Set(
Object.entries(mapVdisSrs)
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
.filter(([_, srUuid]) => srUuid === null)
.map(([vdiUuid]) => vdiUuid)
)
backup = await this.#decorateIncrementalVmMetadata(await adapter.readIncrementalVmBackup(metadata, ignoredVdis))
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
}
@@ -74,7 +48,8 @@ export class ImportVmBackup {
const vmRef = isFull
? await xapi.VM_import(backup, srRef)
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
newMacAddresses,
...this._importIncrementalVmSettings,
detectBase: false,
})
await Promise.all([
@@ -84,13 +59,6 @@ export class ImportVmBackup {
vmRef,
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
),
xapi.call(
'VM.set_name_description',
vmRef,
`Restored on ${formatFilenameDate(+new Date())} from ${adapter._handler._remote.name} -
${metadata.vm.name_description}
`
),
])
return {

View File

@@ -1,3 +1,4 @@
import find from 'lodash/find.js'
import groupBy from 'lodash/groupBy.js'
import ignoreErrors from 'promise-toolbox/ignoreErrors'
import omit from 'lodash/omit.js'
@@ -11,18 +12,24 @@ import { cancelableMap } from './_cancelableMap.mjs'
import { Task } from './Task.mjs'
import pick from 'lodash/pick.js'
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM
export const TAG_BASE_DELTA = 'xo:base_delta'
// in `other_config` of an incrementally replicated VM, contains the UUID of the target SR used for replication
//
// added after the complete replication
export const TAG_BACKUP_SR = 'xo:backup:sr'
// in other_config of VDIs of an incrementally replicated VM, contains the UUID of the source VDI
export const TAG_COPY_SRC = 'xo:copy_of'
const TAG_BACKUP_SR = 'xo:backup:sr'
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
const resolveUuid = async (xapi, cache, uuid, type) => {
if (uuid == null) {
return uuid
}
let ref = cache.get(uuid)
if (ref === undefined) {
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
cache.set(uuid, ref)
}
return ref
}
export async function exportIncrementalVm(
vm,
@@ -140,7 +147,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
$defer,
incrementalVm,
sr,
{ cancelToken = CancelToken.none, newMacAddresses = false } = {}
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
) {
const { version } = incrementalVm
if (compareVersions(version, '1.0.0') < 0) {
@@ -150,6 +157,35 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
const vmRecord = incrementalVm.vm
const xapi = sr.$xapi
let baseVm
if (detectBase) {
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(
xapi.objects.all,
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
}
const cache = new Map()
const mapVdisSrRefs = {}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
const vdiRecords = incrementalVm.vdis
// 0. Create suspend_VDI
@@ -161,7 +197,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
})
} else {
suspendVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
suspendVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => suspendVdi.$destroy())
}
}
@@ -179,6 +226,10 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
ha_always_run: false,
is_a_template: false,
name_label: '[Importing…] ' + vmRecord.name_label,
other_config: {
...vmRecord.other_config,
[TAG_COPY_SRC]: vmRecord.uuid,
},
},
{
bios_strings: vmRecord.bios_strings,
@@ -199,8 +250,14 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
const vdi = vdiRecords[vdiRef]
let newVdi
if (vdi.baseVdi !== undefined) {
newVdi = await xapi.getRecord('VDI', await vdi.baseVdi.$clone())
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (remoteBaseVdiUuid) {
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
@@ -211,7 +268,18 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
// suspendVDI has already created
newVdi = suspendVdi
} else {
newVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
newVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => newVdi.$destroy())
}
@@ -256,9 +324,7 @@ export const importIncrementalVm = defer(async function importIncrementalVm(
if (stream.length === undefined) {
stream = await createVhdStreamWithLength(stream)
}
await xapi.setField('VDI', vdi.$ref, 'name_label', `[Importing] ${vdiRecords[id].name_label}`)
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
await xapi.setField('VDI', vdi.$ref, 'name_label', vdiRecords[id].name_label)
}
}),

View File

@@ -1,11 +1,11 @@
import cloneDeep from 'lodash/cloneDeep.js'
import mapValues from 'lodash/mapValues.js'
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
export function forkDeltaExport(deltaExport) {
const { streams, ...rest } = deltaExport
const newMetadata = cloneDeep(rest)
newMetadata.streams = mapValues(streams, forkStreamUnpipe)
return newMetadata
return Object.create(deltaExport, {
streams: {
value: mapValues(deltaExport.streams, forkStreamUnpipe),
},
})
}

View File

@@ -11,7 +11,6 @@ import { dirname } from 'node:path'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
@@ -196,7 +195,7 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
assert.notStrictEqual(
parentPath,
undefined,
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config['xo:base_delta']}`
)
parentPath = parentPath.slice(1) // remove leading slash

View File

@@ -4,13 +4,12 @@ import { formatDateTime } from '@xen-orchestra/xapi'
import { formatFilenameDate } from '../../_filenameDate.mjs'
import { getOldEntries } from '../../_getOldEntries.mjs'
import { importIncrementalVm, TAG_BACKUP_SR, TAG_BASE_DELTA, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
import { importIncrementalVm, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
import { Task } from '../../Task.mjs'
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
import { listReplicatedVms } from './_listReplicatedVms.mjs'
import find from 'lodash/find.js'
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
@@ -82,54 +81,6 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
}
#decorateVmMetadata(backup) {
const { _warmMigration } = this._settings
const sr = this._sr
const xapi = sr.$xapi
const vm = backup.vm
vm.other_config[TAG_COPY_SRC] = vm.uuid
const remoteBaseVmUuid = vm.other_config[TAG_BASE_DELTA]
let baseVm
if (remoteBaseVmUuid) {
baseVm = find(
xapi.objects.all,
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
const baseVdis = {}
baseVm?.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
vm.other_config[TAG_COPY_SRC] = vm.uuid
if (!_warmMigration) {
vm.tags.push('Continuous Replication')
}
Object.values(backup.vdis).forEach(vdi => {
vdi.other_config[TAG_COPY_SRC] = vdi.uuid
vdi.SR = sr.$ref
// vdi.other_config[TAG_BASE_DELTA] is never defined on a suspend vdi
if (vdi.other_config[TAG_BASE_DELTA]) {
const remoteBaseVdiUuid = vdi.other_config[TAG_BASE_DELTA]
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
vdi.baseVdi = baseVdi
}
})
return backup
}
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
const { _warmMigration } = this._settings
const sr = this._sr
@@ -140,7 +91,16 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
let targetVmRef
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await importIncrementalVm(this.#decorateVmMetadata(deltaExport), sr)
targetVmRef = await importIncrementalVm(
{
__proto__: deltaExport,
vm: {
...deltaExport.vm,
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
},
},
sr
)
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
@@ -161,13 +121,13 @@ export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWr
)
),
targetVm.update_other_config({
[TAG_BACKUP_SR]: srUuid,
'xo:backup:sr': srUuid,
// these entries need to be added in case of offline backup
'xo:backup:datetime': formatDateTime(timestamp),
'xo:backup:job': job.id,
'xo:backup:schedule': scheduleId,
[TAG_BASE_DELTA]: vm.uuid,
'xo:backup:vm': vm.uuid,
}),
])
}

View File

@@ -1,10 +1,11 @@
#!/usr/bin/env node
import { defer } from 'golike-defer'
import { readFileSync } from 'fs'
import { Ref, Xapi } from 'xen-api'
'use strict'
const pkg = JSON.parse(readFileSync(new URL('./package.json', import.meta.url)))
const { Ref, Xapi } = require('xen-api')
const { defer } = require('golike-defer')
const pkg = require('./package.json')
Xapi.prototype.getVmDisks = async function (vm) {
const disks = { __proto__: null }

View File

@@ -10,10 +10,10 @@
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=10"
"node": ">=8"
},
"bin": {
"xo-cr-seed": "./index.mjs"
"xo-cr-seed": "./index.js"
},
"preferGlobal": true,
"dependencies": {

View File

@@ -4,8 +4,6 @@
- Explicit error if users attempt to connect from a slave host (PR [#7110](https://github.com/vatesfr/xen-orchestra/pull/7110))
- More compact UI (PR [#7159](https://github.com/vatesfr/xen-orchestra/pull/7159))
- Fix dashboard host patches list (PR [#7169](https://github.com/vatesfr/xen-orchestra/pull/7169))
- Ability to export selected VMs (PR [#7174](https://github.com/vatesfr/xen-orchestra/pull/7174))
## **0.1.5** (2023-11-07)

View File

@@ -44,15 +44,9 @@ const props = defineProps<{
}>();
const sortedPatches = computed(() =>
[...props.patches].sort((patch1, patch2) => {
if (patch1.changelog == null) {
return 1;
} else if (patch2.changelog == null) {
return -1;
}
return patch1.changelog.date - patch2.changelog.date;
})
[...props.patches].sort(
(patch1, patch2) => patch1.changelog.date - patch2.changelog.date
)
);
const { isDesktop } = useUiStore();

View File

@@ -125,9 +125,7 @@ const emit = defineEmits<{
const model = useVModel(props, "modelValue", emit);
const openRawValueModal = (code: string) =>
useModal(() => import("@/components/modals/CodeHighlightModal.vue"), {
code,
});
useModal(() => import("@/components/CodeHighlight.vue"), { code });
</script>
<style lang="postcss" scoped>

View File

@@ -1,80 +0,0 @@
<template>
<FormInputGroup>
<FormNumber v-model="sizeInput" :max-decimals="3" />
<FormSelect v-model="prefixInput">
<option
v-for="currentPrefix in availablePrefixes"
:key="currentPrefix"
:value="currentPrefix"
>
{{ currentPrefix }}B
</option>
</FormSelect>
</FormInputGroup>
</template>
<script lang="ts" setup>
import FormInputGroup from "@/components/form/FormInputGroup.vue";
import FormNumber from "@/components/form/FormNumber.vue";
import FormSelect from "@/components/form/FormSelect.vue";
import { useVModel } from "@vueuse/core";
import humanFormat, { type Prefix } from "human-format";
import { ref, watch } from "vue";
const props = defineProps<{
modelValue: number | undefined;
}>();
const emit = defineEmits<{
(event: "update:modelValue", value: number): number;
}>();
const availablePrefixes: Prefix<"binary">[] = ["Ki", "Mi", "Gi"];
const model = useVModel(props, "modelValue", emit, {
shouldEmit: (value) => value !== props.modelValue,
});
const sizeInput = ref();
const prefixInput = ref();
const scale = humanFormat.Scale.create(availablePrefixes, 1024, 1);
watch([sizeInput, prefixInput], ([newSize, newPrefix]) => {
if (newSize === "" || newSize === undefined) {
return;
}
model.value = humanFormat.parse(`${newSize || 0} ${newPrefix || "Ki"}`, {
scale,
});
});
watch(
() => props.modelValue,
(newValue) => {
if (newValue === undefined) {
sizeInput.value = undefined;
if (prefixInput.value === undefined) {
prefixInput.value = availablePrefixes[0];
}
return;
}
const { value, prefix } = humanFormat.raw(newValue, {
scale,
prefix: prefixInput.value,
});
console.log(value);
sizeInput.value = value;
if (value !== 0) {
prefixInput.value = prefix;
}
},
{ immediate: true }
);
</script>

View File

@@ -1,77 +0,0 @@
<template>
<FormInput v-model="localValue" inputmode="decimal" />
</template>
<script lang="ts" setup>
import FormInput from "@/components/form/FormInput.vue";
import { computed, ref, watch } from "vue";
const props = defineProps<{
modelValue: number | undefined;
maxDecimals?: number;
}>();
const emit = defineEmits<{
(event: "update:modelValue", value: number | undefined): void;
}>();
const localValue = ref("");
const hasTrailingDot = ref(false);
const cleaningRegex = computed(() => {
if (props.maxDecimals === undefined) {
// Any number with optional decimal part
return /(\d*\.?\d*)/;
}
if (props.maxDecimals > 0) {
// Numbers with up to `props.maxDecimals` decimal places
return new RegExp(`(\\d*\\.?\\d{0,${props.maxDecimals}})`);
}
// Integer numbers only
return /(\d*)/;
});
watch(
localValue,
(newLocalValue) => {
const cleanValue =
localValue.value
.replace(",", ".")
.replace(/[^0-9.]/g, "")
.match(cleaningRegex.value)?.[0] ?? "";
hasTrailingDot.value = cleanValue.endsWith(".");
if (cleanValue !== newLocalValue) {
localValue.value = cleanValue;
return;
}
if (newLocalValue === "") {
emit("update:modelValue", undefined);
return;
}
const parsedValue = parseFloat(cleanValue);
emit(
"update:modelValue",
Number.isNaN(parsedValue) ? undefined : parsedValue
);
},
{ flush: "post" }
);
watch(
() => props.modelValue,
(newModelValue) => {
localValue.value = `${newModelValue?.toString() ?? ""}${
hasTrailingDot.value ? "." : ""
}`;
},
{ immediate: true }
);
</script>

View File

@@ -12,6 +12,6 @@ import BasicModalLayout from "@/components/ui/modals/layouts/BasicModalLayout.vu
import UiModal from "@/components/ui/modals/UiModal.vue";
defineProps<{
code: any;
code: string;
}>();
</script>

View File

@@ -1,56 +0,0 @@
<template>
<UiModal>
<FormModalLayout :icon="faDisplay">
<template #title>
{{ $t("export-n-vms-manually", { n: labelWithUrl.length }) }}
</template>
<p>
{{ $t("export-vms-manually-information") }}
</p>
<ul class="list">
<li v-for="({ url, label }, index) in labelWithUrl" :key="index">
<a :href="url.href" target="_blank">
{{ label }}
</a>
</li>
</ul>
<template #buttons>
<ModalDeclineButton />
</template>
</FormModalLayout>
</UiModal>
</template>
<script lang="ts" setup>
import FormModalLayout from "@/components/ui/modals/layouts/FormModalLayout.vue";
import ModalDeclineButton from "@/components/ui/modals/ModalDeclineButton.vue";
import UiModal from "@/components/ui/modals/UiModal.vue";
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
import { useVmCollection } from "@/stores/xen-api/vm.store";
import { faDisplay } from "@fortawesome/free-solid-svg-icons";
import { computed } from "vue";
const props = defineProps<{
blockedUrls: URL[];
}>();
const { getByOpaqueRef } = useVmCollection();
const labelWithUrl = computed(() =>
props.blockedUrls.map((url) => {
const ref = url.searchParams.get("ref") as XenApiVm["$ref"];
return {
url: url,
label: getByOpaqueRef(ref)?.name_label ?? ref,
};
})
);
</script>
<style lang="postcss" scoped>
.list {
margin-top: 2rem;
}
</style>

View File

@@ -1,65 +0,0 @@
<template>
<UiModal @submit.prevent="handleSubmit">
<FormModalLayout :icon="faDisplay">
<template #title>
{{ $t("export-n-vms", { n: vmRefs.length }) }}
</template>
<FormInputWrapper
light
learn-more-url="https://xcp-ng.org/blog/2018/12/19/zstd-compression-for-xcp-ng/"
:label="$t('select-compression')"
>
<FormSelect v-model="compressionType">
<option
v-for="key in Object.keys(VM_COMPRESSION_TYPE)"
:key="key"
:value="
VM_COMPRESSION_TYPE[key as keyof typeof VM_COMPRESSION_TYPE]
"
>
{{ $t(key.toLowerCase()) }}
</option>
</FormSelect>
</FormInputWrapper>
<template #buttons>
<ModalDeclineButton />
<ModalApproveButton>
{{ $t("export-n-vms", { n: vmRefs.length }) }}
</ModalApproveButton>
</template>
</FormModalLayout>
</UiModal>
</template>
<script lang="ts" setup>
import { faDisplay } from "@fortawesome/free-solid-svg-icons";
import { inject, ref } from "vue";
import FormInputWrapper from "@/components/form/FormInputWrapper.vue";
import FormSelect from "@/components/form/FormSelect.vue";
import FormModalLayout from "@/components/ui/modals/layouts/FormModalLayout.vue";
import ModalApproveButton from "@/components/ui/modals/ModalApproveButton.vue";
import ModalDeclineButton from "@/components/ui/modals/ModalDeclineButton.vue";
import UiModal from "@/components/ui/modals/UiModal.vue";
import { IK_MODAL } from "@/types/injection-keys";
import { useXenApiStore } from "@/stores/xen-api.store";
import { VM_COMPRESSION_TYPE } from "@/libs/xen-api/xen-api.enums";
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
const props = defineProps<{
vmRefs: XenApiVm["$ref"][];
}>();
const modal = inject(IK_MODAL)!;
const compressionType = ref(VM_COMPRESSION_TYPE.DISABLED);
const handleSubmit = () => {
const xenApi = useXenApiStore().getXapi();
xenApi.vm.export(props.vmRefs, compressionType.value);
modal.approve();
};
</script>

View File

@@ -1,50 +1,51 @@
<template>
<MenuItem
v-tooltip="
vmRefs.length > 0 &&
!isSomeExportable &&
$t('no-selected-vm-can-be-exported')
"
:icon="faDisplay"
:disabled="isDisabled"
@click="openModal"
>
{{ $t("export-vms") }}
<MenuItem :icon="faFileExport">
{{ $t("export") }}
<template #submenu>
<MenuItem
v-tooltip="{ content: $t('coming-soon'), placement: 'left' }"
:icon="faDisplay"
>
{{ $t("export-vms") }}
</MenuItem>
<MenuItem
:icon="faCode"
@click="
exportVmsAsJsonFile(vms, `vms_${new Date().toISOString()}.json`)
"
>
{{ $t("export-table-to", { type: ".json" }) }}
</MenuItem>
<MenuItem
:icon="faFileCsv"
@click="exportVmsAsCsvFile(vms, `vms_${new Date().toISOString()}.csv`)"
>
{{ $t("export-table-to", { type: ".csv" }) }}
</MenuItem>
</template>
</MenuItem>
</template>
<script lang="ts" setup>
import { computed } from "vue";
import { faDisplay } from "@fortawesome/free-solid-svg-icons";
import MenuItem from "@/components/menu/MenuItem.vue";
import { DisabledContext } from "@/context";
import { useContext } from "@/composables/context.composable";
import { useModal } from "@/composables/modal.composable";
import { useVmCollection } from "@/stores/xen-api/vm.store";
import { VM_OPERATION } from "@/libs/xen-api/xen-api.enums";
import { computed } from "vue";
import { exportVmsAsCsvFile, exportVmsAsJsonFile } from "@/libs/vm";
import MenuItem from "@/components/menu/MenuItem.vue";
import {
faCode,
faDisplay,
faFileCsv,
faFileExport,
} from "@fortawesome/free-solid-svg-icons";
import { vTooltip } from "@/directives/tooltip.directive";
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
const props = defineProps<{ vmRefs: XenApiVm["$ref"][] }>();
const props = defineProps<{
vmRefs: XenApiVm["$ref"][];
}>();
const { getByOpaqueRefs, areSomeOperationAllowed } = useVmCollection();
const isParentDisabled = useContext(DisabledContext);
const isSomeExportable = computed(() =>
getByOpaqueRefs(props.vmRefs).some((vm) =>
areSomeOperationAllowed(vm, VM_OPERATION.EXPORT)
)
const { getByOpaqueRef: getVm } = useVmCollection();
const vms = computed(() =>
props.vmRefs.map(getVm).filter((vm): vm is XenApiVm => vm !== undefined)
);
const isDisabled = computed(
() => isParentDisabled.value || !isSomeExportable.value
);
const openModal = () => {
useModal(() => import("@/components/modals/VmExportModal.vue"), {
vmRefs: props.vmRefs,
});
};
</script>

View File

@@ -1,45 +0,0 @@
<template>
<MenuItem :icon="faFileExport">
{{ $t("export") }}
<template #submenu>
<VmActionExportItem :vmRefs="vmRefs" />
<MenuItem
:icon="faCode"
@click="
exportVmsAsJsonFile(vms, `vms_${new Date().toISOString()}.json`)
"
>
{{ $t("export-table-to", { type: ".json" }) }}
</MenuItem>
<MenuItem
:icon="faFileCsv"
@click="exportVmsAsCsvFile(vms, `vms_${new Date().toISOString()}.csv`)"
>
{{ $t("export-table-to", { type: ".csv" }) }}
</MenuItem>
</template>
</MenuItem>
</template>
<script lang="ts" setup>
import { useVmCollection } from "@/stores/xen-api/vm.store";
import { computed } from "vue";
import { exportVmsAsCsvFile, exportVmsAsJsonFile } from "@/libs/vm";
import MenuItem from "@/components/menu/MenuItem.vue";
import VmActionExportItem from "@/components/vm/VmActionItems/VmActionExportItem.vue";
import {
faCode,
faFileCsv,
faFileExport,
} from "@fortawesome/free-solid-svg-icons";
import type { XenApiVm } from "@/libs/xen-api/xen-api.types";
const props = defineProps<{
vmRefs: XenApiVm["$ref"][];
}>();
const { getByOpaqueRef: getVm } = useVmCollection();
const vms = computed(() =>
props.vmRefs.map(getVm).filter((vm): vm is XenApiVm => vm !== undefined)
);
</script>

View File

@@ -21,7 +21,7 @@
{{ $t("edit-config") }}
</MenuItem>
<VmActionSnapshotItem :vm-refs="selectedRefs" />
<VmActionExportItems :vm-refs="selectedRefs" />
<VmActionExportItem :vm-refs="selectedRefs" />
<VmActionDeleteItem :vm-refs="selectedRefs" />
</AppMenu>
</template>
@@ -32,7 +32,7 @@ import MenuItem from "@/components/menu/MenuItem.vue";
import UiButton from "@/components/ui/UiButton.vue";
import VmActionCopyItem from "@/components/vm/VmActionItems/VmActionCopyItem.vue";
import VmActionDeleteItem from "@/components/vm/VmActionItems/VmActionDeleteItem.vue";
import VmActionExportItems from "@/components/vm/VmActionItems/VmActionExportItems.vue";
import VmActionExportItem from "@/components/vm/VmActionItems/VmActionExportItem.vue";
import VmActionMigrateItem from "@/components/vm/VmActionItems/VmActionMigrateItem.vue";
import VmActionPowerStateItems from "@/components/vm/VmActionItems/VmActionPowerStateItems.vue";
import VmActionSnapshotItem from "@/components/vm/VmActionItems/VmActionSnapshotItem.vue";

View File

@@ -82,8 +82,8 @@ const testMetric = (
typeof test === "string"
? test === type
: typeof test === "function"
? test(type)
: test.exec(type);
? test(type)
: test.exec(type);
const findMetric = (metrics: any, metricType: string) => {
let testResult;

View File

@@ -491,9 +491,3 @@ export enum CERTIFICATE_TYPE {
HOST = "host",
HOST_INTERNAL = "host_internal",
}
export enum VM_COMPRESSION_TYPE {
DISABLED = "false",
GZIP = "true",
ZSTD = "zstd",
}

View File

@@ -18,8 +18,6 @@ import type {
import { buildXoObject, typeToRawType } from "@/libs/xen-api/xen-api.utils";
import { JSONRPCClient } from "json-rpc-2.0";
import { castArray } from "lodash-es";
import type { VM_COMPRESSION_TYPE } from "@/libs/xen-api/xen-api.enums";
import { useModal } from "@/composables/modal.composable";
export default class XenApi {
private client: JSONRPCClient;
@@ -29,12 +27,10 @@ export default class XenApi {
Set<(...args: any[]) => void>
>();
private fromToken: string | undefined;
private hostUrl: string;
constructor(hostUrl: string) {
this.hostUrl = hostUrl;
this.client = new JSONRPCClient(async (request) => {
const response = await fetch(`${this.hostUrl}/jsonrpc`, {
const response = await fetch(`${hostUrl}/jsonrpc`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify(request),
@@ -384,36 +380,6 @@ export default class XenApi {
)
);
},
export: (vmRefs: VmRefs, compression: VM_COMPRESSION_TYPE) => {
const blockedUrls: URL[] = [];
castArray(vmRefs).forEach((vmRef) => {
const url = new URL(this.hostUrl);
url.pathname = "/export/";
url.search = new URLSearchParams({
session_id: this.sessionId!,
ref: vmRef,
use_compression: compression,
}).toString();
const _window = window.open(url.href, "_blank");
if (_window === null) {
blockedUrls.push(url);
} else {
URL.revokeObjectURL(url.toString());
}
});
if (blockedUrls.length > 0) {
const { onClose } = useModal(
() => import("@/components/modals/VmExportBlockedUrlsModal.vue"),
{ blockedUrls }
);
onClose(() =>
blockedUrls.forEach((url) => URL.revokeObjectURL(url.toString()))
);
}
},
};
}
}

View File

@@ -43,7 +43,6 @@
"delete-vms": "Delete 1 VM | Delete {n} VMs",
"descending": "descending",
"description": "Description",
"disabled": "Disabled",
"display": "Display",
"do-you-have-needs": "You have needs and/or expectations? Let us know",
"documentation": "Documentation",
@@ -52,11 +51,8 @@
"error-no-data": "Error, can't collect data.",
"error-occurred": "An error has occurred",
"export": "Export",
"export-n-vms": "Export 1 VM | Export {n} VMs",
"export-n-vms-manually": "Export 1 VM manually | Export {n} VMs manually",
"export-table-to": "Export table to {type}",
"export-vms": "Export VMs",
"export-vms-manually-information": "Some VM exports were not able to start automatically, probably due to your browser settings. To export them, you should click on each one. (Alternatively, copy the link as well.)",
"fetching-fresh-data": "Fetching fresh data",
"filter": {
"comparison": {
@@ -82,7 +78,6 @@
"fullscreen": "Fullscreen",
"fullscreen-leave": "Leave fullscreen",
"go-back": "Go back",
"gzip": "gzip",
"here": "Here",
"hosts": "Hosts",
"keep-me-logged": "Keep me logged in",
@@ -109,7 +104,6 @@
"news": "News",
"news-name": "{name} news",
"no-alarm-triggered": "No alarm triggered",
"no-selected-vm-can-be-exported": "No selected VM can be exported",
"no-selected-vm-can-be-migrated": "No selected VM can be migrated",
"no-tasks": "No tasks",
"not-found": "Not found",
@@ -145,7 +139,6 @@
},
"resume": "Resume",
"save": "Save",
"select-compression": "Select a compression",
"select-destination-host": "Select a destination host",
"selected-vms-in-execution": "Some selected VMs are running",
"send-ctrl-alt-del": "Send Ctrl+Alt+Del",
@@ -187,6 +180,5 @@
"version": "Version",
"vm-is-running": "The VM is running",
"vms": "VMs",
"xo-lite-under-construction": "XOLite is under construction",
"zstd": "zstd"
"xo-lite-under-construction": "XOLite is under construction"
}

View File

@@ -43,7 +43,6 @@
"delete-vms": "Supprimer 1 VM | Supprimer {n} VMs",
"descending": "descendant",
"description": "Description",
"disabled": "Désactivé",
"display": "Affichage",
"do-you-have-needs": "Vous avez des besoins et/ou des attentes ? Faites le nous savoir",
"documentation": "Documentation",
@@ -52,11 +51,8 @@
"error-no-data": "Erreur, impossible de collecter les données.",
"error-occurred": "Une erreur est survenue",
"export": "Exporter",
"export-n-vms": "Exporter 1 VM | Exporter {n} VMs",
"export-n-vms-manually": "Exporter 1 VM manuellement | Exporter {n} VMs manuellement",
"export-table-to": "Exporter le tableau en {type}",
"export-vms": "Exporter les VMs",
"export-vms-manually-information": "Certaines exportations de VMs n'ont pas pu démarrer automatiquement, peut-être en raison des paramètres du navigateur. Pour les exporter, vous devrez cliquer sur chacune d'entre elles. (Ou copier le lien.)",
"fetching-fresh-data": "Récupération de données à jour",
"filter": {
"comparison": {
@@ -82,7 +78,6 @@
"fullscreen": "Plein écran",
"fullscreen-leave": "Quitter plein écran",
"go-back": "Revenir en arrière",
"gzip": "gzip",
"here": "Ici",
"hosts": "Hôtes",
"keep-me-logged": "Rester connecté",
@@ -109,7 +104,6 @@
"news": "Actualités",
"news-name": "Actualités {name}",
"no-alarm-triggered": "Aucune alarme déclenchée",
"no-selected-vm-can-be-exported": "Aucune VM sélectionnée ne peut être exportée",
"no-selected-vm-can-be-migrated": "Aucune VM sélectionnée ne peut être migrée",
"no-tasks": "Aucune tâche",
"not-found": "Non trouvé",
@@ -145,7 +139,6 @@
},
"resume": "Reprendre",
"save": "Enregistrer",
"select-compression": "Sélectionnez une compression",
"select-destination-host": "Sélectionnez un hôte de destination",
"selected-vms-in-execution": "Certaines VMs sélectionnées sont en cours d'exécution",
"send-ctrl-alt-del": "Envoyer Ctrl+Alt+Suppr",
@@ -187,6 +180,5 @@
"version": "Version",
"vm-is-running": "La VM est en cours d'exécution",
"vms": "VMs",
"xo-lite-under-construction": "XOLite est en construction",
"zstd": "zstd"
"xo-lite-under-construction": "XOLite est en construction"
}

View File

@@ -1,7 +0,0 @@
```vue-template
<FormByteSize v-model="size" />
```
```vue-script
const size = ref(0);
```

View File

@@ -1,16 +0,0 @@
<template>
<ComponentStory
v-slot="{ properties }"
:params="[
model().type('number').required().preset(4096).help('The size in bytes'),
]"
>
<FormByteSize v-bind="properties" />
</ComponentStory>
</template>
<script lang="ts" setup>
import ComponentStory from "@/components/component-story/ComponentStory.vue";
import FormByteSize from "@/components/form/FormByteSize.vue";
import { model } from "@/libs/story/story-param";
</script>

View File

@@ -1,6 +1,6 @@
<template>
<ComponentStory
:params="[slot().help('Can contain multiple FormInput and FormSelect')]"
:params="[slot().help('Can contains multiple FormInput and FormSelect')]"
>
<FormInputGroup>
<FormInput />

View File

@@ -31,12 +31,9 @@ export type XenApiPatch = {
size: number;
url: string;
version: string;
changelog:
| null
| undefined
| {
date: number;
description: string;
author: string;
};
changelog: {
date: number;
description: string;
author: string;
};
};

View File

@@ -33,7 +33,7 @@ const callUpdate = params =>
} else if (method === 'server-error') {
reject(new Error(params.message))
} else if (method !== 'connected') {
warn('update.update, unhandled message', {
warn('updater.update, unhandled message', {
method,
params,
})

View File

@@ -235,9 +235,6 @@ export default class Esxi extends EventEmitter {
return Object.keys(datas).map(id => {
const { config, storage, runtime } = datas[id]
if (storage === undefined) {
throw new Error(`source VM ${id} don't have any storage`)
}
const perDatastoreUsage = Array.isArray(storage.perDatastoreUsage)
? storage.perDatastoreUsage
: [storage.perDatastoreUsage]

View File

@@ -1,7 +1,6 @@
#!/usr/bin/env node
import { main } from 'xen-api/cli-lib.mjs'
import { Xapi } from './index.mjs'
import CLI from 'xen-api/dist/cli.js'
main(opts => new Xapi(opts)).catch(console.error.bind(console, 'FATAL'))
CLI.default(opts => new Xapi(opts)).catch(console.error.bind(console, 'FATAL'))

View File

@@ -137,16 +137,14 @@ class Vdi {
const vdi = await this.getRecord('VDI', ref)
const sr = await this.getRecord('SR', vdi.SR)
try {
const taskRef = await this.task_create(`Importing content into VDI ${vdi.name_label} on SR ${sr.name_label}`)
const uuid = await this.getField('task', taskRef, 'uuid')
await vdi.update_other_config({ 'xo:import:task': uuid, 'xo:import:length': stream.length.toString() })
await this.putResource(cancelToken, stream, '/import_raw_vdi/', {
query: {
format,
vdi: ref,
},
task: taskRef,
task: await this.task_create(`Importing content into VDI ${vdi.name_label} on SR ${sr.name_label}`),
})
} catch (error) {
// augment the error with as much relevant info as possible
@@ -155,8 +153,6 @@ class Vdi {
error.SR = sr
error.VDI = vdi
throw error
} finally {
vdi.update_other_config({ 'xo:import:task': null, 'xo:import:length': null }).catch(warn)
}
}
}

View File

@@ -200,18 +200,6 @@ class Vm {
}
}
_safeSetIsATemplate(ref) {
return pCatch.call(
this.setField('VM', ref, 'is_a_template', false),
// Ignore if this fails due to license restriction
//
// see https://bugs.xenserver.org/browse/XSO-766
{ code: 'LICENSE_RESTRICTION' },
noop
)
}
async assertHealthyVdiChains(vmRef, tolerance = this._maxUncoalescedVdis) {
const vdiRefs = {}
;(await this.getRecords('VBD', await this.getField('VM', vmRef, 'VBDs'))).forEach(({ VDI: ref }) => {
@@ -498,13 +486,12 @@ class Vm {
if (useSnapshot === undefined) {
useSnapshot = isVmRunning(vm)
}
let exportedVmRef, destroySnapshot, isSnapshot
let exportedVmRef, destroySnapshot
if (useSnapshot) {
exportedVmRef = await this.VM_snapshot(vmRef, { cancelToken, name_label: `[XO Export] ${vm.name_label}` })
isSnapshot = true
destroySnapshot = () =>
this.VM_destroy(exportedVmRef).catch(error => {
warn('VM_export: failed to destroy snapshot', {
warn('VM_export: failed to destroy snapshots', {
error,
snapshotRef: exportedVmRef,
vmRef,
@@ -513,13 +500,8 @@ class Vm {
$defer.onFailure(destroySnapshot)
} else {
exportedVmRef = vmRef
isSnapshot = vm.is_a_snapshot
}
try {
// VM snapshots are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM snapshots or plain VMs
await this._safeSetIsATemplate(exportedVmRef, false)
const stream = await this.getResource(cancelToken, '/export/', {
query: {
ref: exportedVmRef,
@@ -528,16 +510,6 @@ class Vm {
task: taskRef,
})
if (isSnapshot) {
// FIXME: VM_IS_SNAPSHOT(OpaqueRef:757d6cfd-a185-4114-bfc8-fb9fdd279bf2, make_into_template)
this._safeSetIsATemplate(exportedVmRef, true).catch(error => {
warn('VM_export: failed to reset is_a_template on snapshot', {
error,
snapshotRef: exportedVmRef,
vmRef,
})
})
}
if (useSnapshot) {
stream.once('end', destroySnapshot).once('error', destroySnapshot)
}
@@ -693,6 +665,18 @@ class Vm {
// detached async
this._httpHook(vm, '/post-sync').catch(noop)
// VM snapshots are marked as templates, unfortunately it does not play well with XVA export/import
// which will import them as templates and not VM snapshots or plain VMs
await pCatch.call(
this.setField('VM', ref, 'is_a_template', false),
// Ignore if this fails due to license restriction
//
// see https://bugs.xenserver.org/browse/XSO-766
{ code: 'LICENSE_RESTRICTION' },
noop
)
if (destroyNobakVdis) {
await asyncMap(await listNobakVbds(this, await this.getField('VM', ref, 'VBDs')), async vbd => {
try {

View File

@@ -7,21 +7,11 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Netbox] Ability to synchronize XO users as Netbox tenants (PR [#7158](https://github.com/vatesfr/xen-orchestra/pull/7158))
- [VM/Console] Add a message to indicate that the console view has been [disabled](https://support.citrix.com/article/CTX217766/how-to-disable-the-console-for-the-vm-in-xencenter) for this VM [#6319](https://github.com/vatesfr/xen-orchestra/issues/6319) (PR [#7161](https://github.com/vatesfr/xen-orchestra/pull/7161))
- [Restore] Show source remote and restoration time on a restored VM (PR [#7186](https://github.com/vatesfr/xen-orchestra/pull/7186))
- [Backup/Import] Show disk import status during Incremental Replication or restoration of Incremental Backup (PR [#7171](https://github.com/vatesfr/xen-orchestra/pull/7171))
- [VM Creation] Added ISO option in new VM form when creating from template with a disk [#3464](https://github.com/vatesfr/xen-orchestra/issues/3464) (PR [#7166](https://github.com/vatesfr/xen-orchestra/pull/7166))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Backup/Restore] In case of snapshot with memory, create the suspend VDI on the correct SR instead of the default one
- [Import/ESXi] Handle `Cannot read properties of undefined (reading 'perDatastoreUsage')` error when importing VM without storage (PR [#7168](https://github.com/vatesfr/xen-orchestra/pull/7168))
- [Export/OVA] Handle export with resulting disk larger than 8.2GB (PR [#7183](https://github.com/vatesfr/xen-orchestra/pull/7183))
- [Self Service] Fix error displayed after adding a VM to a resource set (PR [#7144](https://github.com/vatesfr/xen-orchestra/pull/7144))
- VMs snapshotted with XO will no longer appear as regular VMs in other clients like `xe`
### Packages to release
@@ -39,15 +29,7 @@
<!--packages-start-->
- @vates/nbd-client patch
- @xen-orchestra/backups minor
- @xen-orchestra/cr-seed-cli major
- @xen-orchestra/vmware-explorer patch
- @xen-orchestra/xapi patch
- xen-api major
- xo-server patch
- xo-server-netbox minor
- xo-vmdk-to-vhd patch
- xo-web minor
- @xen-orchestra/backups patch
- xo-web patch
<!--packages-end-->

View File

@@ -84,6 +84,7 @@ module.exports = {
['/xoa', 'XOA Support'],
['/purchase', 'Purchase XOA'],
['/license_management', 'License Management'],
['/reseller', 'Partner Program'],
['/community', 'Community Support'],
],
},

View File

@@ -323,7 +323,7 @@ From there, you can even manage your existing resources with Terraform!
## Netbox
Synchronize your pools, VMs, network interfaces and IP addresses with your [Netbox](https://docs.netbox.dev/en/stable/) instance.
Synchronize your pools, VMs, network interfaces and IP addresses with your [Netbox](https://netbox.readthedocs.io/en/stable/) instance.
![](./assets/netbox.png)
@@ -338,48 +338,38 @@ Synchronize your pools, VMs, network interfaces and IP addresses with your [Netb
XO will try to find the right prefix for each IP address. If it can't find a prefix that fits, the IP address won't be synchronized.
:::
- Create permissions:
- Go to Admin > Permissions > Add and create 2 permissions:
- "XO read" with action "Can view" enabled and object types:
- Create a Netbox user:
- Go to Admin > Authentication and Authorization > Users > Add
- Enter a name and a password and click on "Save and continue editing"
- Scroll down to Permissions and add the following permissions:
- View permissions on:
- Extras > custom field
- IPAM > prefix
- "XO read-write" with all 4 actions enabled and object types:
- All permissions on:
- DCIM > platform
- Extras > tag
- IPAM > IP address
- Tenancy > tenant (if you want to synchronize XO users with Netbox tenants)
- Virtualization > cluster
- Virtualization > cluster type
- Virtualization > virtual machine
- Virtualization > interface
![](./assets/netbox-permissions.png)
- Create a Netbox user:
- Go to Admin > Users > Add
- Choose a username and a password
- Scroll down to Permissions and select the 2 permissions "XO read" and "XO read-write"
- Create an API token:
- Got to Admin > API Tokens > Add
- Select the user you just created
- Copy the token for the next step
- Make sure "Write enabled" is checked and create it
:::warning
For testing purposes, you can create an API token bound to a Netbox superuser account, but once in production, it is highly recommended to create a dedicated user with only the required permissions.
:::
- Create a UUID custom field:
- Go to Customization > Custom Fields > Add
- Select object types:
- Tenancy > tenant (if you want to synchronize XO users with Netbox tenants)
- From that user's account, generate an API token:
- Go to Profile > API Tokens > Add a token
- Create a token with "Write enabled"
- Add a UUID custom field:
- Go to Other > Customization > Custom fields > Add
- Create a custom field called "uuid" (lower case!)
- Assign it to object types:
- Virtualization > cluster
- Virtualization > virtual machine
- Virtualization > interface
- Name it "uuid" (lower case!)
![](./assets/customfield.png)
:::warning
You can generate an API token from a Netbox superuser account for testing purposes, but once in production, it is highly recommended to create a dedicated user with only the required permissions.
:::
:::tip
In Netbox 2.x, custom fields can be created from the Admin panel > Custom fields > Add custom field.
:::
@@ -391,7 +381,6 @@ In Netbox 2.x, custom fields can be created from the Admin panel > Custom fields
- Unauthorized certificate: only for HTTPS, enable this option if your Netbox instance uses a self-signed SSL certificate
- Token: the token you generated earlier
- Pools: the pools you wish to automatically synchronize with Netbox
- Synchronize users: enable this if you wish to synchronize XO users with Netbox tenants. Tenants will be assigned to the VMs the XO user _created_ within XO. Important: if you want to enable this feature, you also need to assign the custom field "uuid" that you created in the previous step to the type "Tenancy > tenant".
- Interval: the time interval (in hours) between 2 auto-synchronizations. Leave empty if you don't want to synchronize automatically.
- Load the plugin (button next to the plugin's name)
- Manual synchronization: if you correctly configured and loaded the plugin, a "Synchronize with Netbox" button will appear in every pool's Advanced tab, which allows you to manually synchronize it with Netbox

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 58 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@@ -112,7 +112,7 @@ apt-get install build-essential redis-server libpng-dev git python3-minimal libv
On Fedora/CentOS like:
```sh
dnf install redis libpng-devel git libvhdi-tools lvm2 cifs-utils make automake gcc gcc-c++
dnf install redis libpng-devel git libvhdi-utils lvm2 cifs-utils make automake gcc gcc-c++
```
### Make sure Redis is running

View File

@@ -40,10 +40,9 @@ The second step is to select your purchase option:
- Paid period: **check or wire transfer only**. This purchase allows you to subscribe for a one, two or three year period
:::tip
- A 2 year subscription period grants you 1 month discounted
- A 3 year subscription period grants you 2 months discounted
:::
:::
Then you need to fill in your information and select **"Buy for my own use"** (direct purchase)
@@ -106,6 +105,46 @@ That's it, you have now completed the purchase.
Once you have bound the plan to your end user account, you cannot change it. Double check the spelling of the e-mail before binding the account.
:::
## As a reseller
The Xen Orchestra partner program is designed to offer you the opportunity to become a reseller of Xen Orchestra and deliver a full stack solution to your customers.
:::tip
Becoming a reseller will grant you a standard discount. However, **the reseller status is designed for companies that want to actively prospect for new Xen Orchestra users**. That's why we are asking our partners to **resell Xen Orchestra at least two times a year**. If you are acting as a third party purchaser answering to a specific request from one of your customers, you don't need to apply to the reseller program - you can follow [this process](purchase.md#purchase-xoas) instead.
:::
### Apply to the program
To apply to our partner program, you can access the [partner page](https://xen-orchestra.com/#!/partner) and click on the "Register to become a partner" button:
![](./assets/partner_request.png)
You will have to complete a form in order to provide information regarding your expectations and location. Once you've finished, you should receive an email in order to **start the discussion with someone from our team**.
:::tip
It's important to answer the email - this will start the discussion with someone from our team in order to determine together if the partner status is what you really need.
:::
Once we have activated your partner space, you will have the ability to access the purchasing page [at the same place](https://xen-orchestra.com/#!/partner).
### Purchase XOAs
Now that you can see the reseller interface:
![](./assets/purchasing-process.png)
You can follow these steps to purchase an XOA edition for your customer.
1. choose the edition you want to purchase for your customer
2. Buy it on your reseller page (the discount is automatically applied - once it's done, a new line appears on your reseller page)
3. assign/bind the plan to your final client email
You'll have all the invoices in your account.
### From your client's perspective
Your client can use the email assigned to register their appliance, and unlock the Edition you purchased. They will not get any invoices from us but can still access our support system.
## Invoices
Invoices are available in PDF format. You can find them [in your account](https://xen-orchestra.com/#!/member).

39
docs/reseller.md Normal file
View File

@@ -0,0 +1,39 @@
# Partner Program
The Xen Orchestra partner program is designed to offer you the opportunity to become a reseller of Xen Orchestra and deliver a full stack solution to your customers.
:::tip
Becoming a reseller will grant you a standard discount. However, **the reseller status is designed for companies that want to actively prospect for new Xen Orchestra users**. That's why we are asking our partners to **resell Xen Orchestra at least two times a year**. If you are acting as a third party purchaser answering to a specific request from one of your customers, you don't need to apply to the reseller program - you can follow [this process](./purchase.md#via-your-purchase-departement) instead.
:::
## Apply to the program
To apply to our partner program, you can access the [partner page](https://xen-orchestra.com/#!/partner) and click on the "Register to become a partner" button:
![](./assets/partner_request.png)
You will have to complete a form in order to provide information regarding your expectations and location. Once you've finished, you should receive an email in order to **start the discussion with someone from our team**.
:::tip
It's important to answer the email - this will start the discussion with someone from our team in order to determine together if the partner status is what you really need.
:::
Once we have activated your partner space, you will have the ability to access the purchasing page [at the same place](https://xen-orchestra.com/#!/partner).
## Purchase XOAs
Now that you can see the reseller interface:
![](./assets/purchasing-process.png)
You can follow these steps to purchase an XOA edition for your customer.
1. choose the edition you want to purchase for your customer
2. Buy it on your reseller page (the discount is automatically applied - once it's done, a new line appears on your reseller page)
3. assign/bind the plan to your final client email
You'll have all the invoices in your account.
## From your client's perspective
Your client can use the email assigned to register their appliance, and unlock the Edition you purchased. They will not get any invoices from us but can still access our support system.

View File

@@ -23,8 +23,8 @@ function r(p) {
return v && v.__esModule
? v
: typeof v === 'object' || typeof v === 'function'
? Object.create(v, { default: { enumerable: true, value: v } })
: { default: v }
? Object.create(v, { default: { enumerable: true, value: v } })
: { default: v }
}
function e(p, i) {
dl(defaults, i, function () {

View File

@@ -0,0 +1,3 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1 @@
../../scripts/babel-eslintrc.js

View File

@@ -25,9 +25,9 @@
"url": "https://vates.fr"
},
"preferGlobal": true,
"main": "./index.mjs",
"main": "dist/",
"bin": {
"xapi-explore-sr": "./index.mjs"
"xapi-explore-sr": "dist/index.js"
},
"engines": {
"node": ">=8"
@@ -42,7 +42,19 @@
"pw": "^0.0.4",
"xen-api": "^1.3.6"
},
"devDependencies": {
"@babel/cli": "^7.1.5",
"@babel/core": "^7.1.5",
"@babel/preset-env": "^7.1.5",
"cross-env": "^7.0.2",
"rimraf": "^5.0.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,15 +1,13 @@
#!/usr/bin/env node
import { createClient } from 'xen-api'
import archy from 'archy'
import chalk from 'chalk'
import execPromise from 'exec-promise'
import firstDefined from '@xen-orchestra/defined'
import forEach from 'lodash/forEach.js'
import humanFormat from 'human-format'
import map from 'lodash/map.js'
import orderBy from 'lodash/orderBy.js'
import pw from 'pw'
import { createClient } from 'xen-api'
import { forEach, map, orderBy } from 'lodash'
// ===================================================================

View File

@@ -0,0 +1,3 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,8 +0,0 @@
export default (setting, defaultValue) =>
setting === undefined
? () => defaultValue
: typeof setting === 'function'
? setting
: typeof setting === 'object'
? method => setting[method] ?? setting['*'] ?? defaultValue
: () => setting

View File

@@ -1,6 +1,8 @@
import t from 'tap'
'use strict'
import parseUrl from './_parseUrl.mjs'
const t = require('tap')
const parseUrl = require('./dist/_parseUrl.js').default
const data = {
'xcp.company.lan': {

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env node
import { createClient } from './index.mjs'
import { main } from './cli-lib.mjs'
main(createClient).catch(console.error.bind(console, 'FATAL'))

View File

@@ -1,3 +0,0 @@
if (process.env.DEBUG === undefined) {
process.env.DEBUG = 'xen-api'
}

View File

@@ -1,23 +1,25 @@
#!/usr/bin/env node
import './env.mjs'
process.env.DEBUG = 'xen-api'
import createProgress from 'progress-stream'
import createTop from 'process-top'
import getopts from 'getopts'
import { defer } from 'golike-defer'
import { CancelToken } from 'promise-toolbox'
const createProgress = require('progress-stream')
const createTop = require('process-top')
const defer = require('golike-defer').default
const getopts = require('getopts')
const { CancelToken } = require('promise-toolbox')
import { createClient } from '../index.mjs'
const { createClient } = require('../')
import { createOutputStream, formatProgress, pipeline, resolveRecord, throttle } from './utils.mjs'
const {
createOutputStream,
formatProgress,
pipeline,
resolveRecord,
throttle,
} = require('./utils')
defer(async ($defer, rawArgs) => {
const {
raw,
throttle: bps,
_: args,
} = getopts(rawArgs, {
const { raw, throttle: bps, _: args } = getopts(rawArgs, {
boolean: 'raw',
alias: {
raw: 'r',
@@ -26,7 +28,9 @@ defer(async ($defer, rawArgs) => {
})
if (args.length < 2) {
return console.log('Usage: export-vdi [--raw] <XS URL> <VDI identifier> [<VHD file>]')
return console.log(
'Usage: export-vdi [--raw] <XS URL> <VDI identifier> [<VHD file>]'
)
}
const xapi = createClient({
@@ -63,5 +67,10 @@ defer(async ($defer, rawArgs) => {
}, 1e3)
)
await pipeline(exportStream, progressStream, throttle(bps), createOutputStream(args[2]))
await pipeline(
exportStream,
progressStream,
throttle(bps),
createOutputStream(args[2])
)
})(process.argv.slice(2)).catch(console.error.bind(console, 'error'))

View File

@@ -1,22 +1,23 @@
#!/usr/bin/env node
import './env.mjs'
process.env.DEBUG = '*'
import createProgress from 'progress-stream'
import getopts from 'getopts'
import { defer } from 'golike-defer'
import { CancelToken } from 'promise-toolbox'
const createProgress = require('progress-stream')
const defer = require('golike-defer').default
const getopts = require('getopts')
const { CancelToken } = require('promise-toolbox')
import { createClient } from '../index.mjs'
const { createClient } = require('../')
import { createOutputStream, formatProgress, pipeline, resolveRecord } from './utils.mjs'
const {
createOutputStream,
formatProgress,
pipeline,
resolveRecord,
} = require('./utils')
defer(async ($defer, rawArgs) => {
const {
gzip,
zstd,
_: args,
} = getopts(rawArgs, {
const { gzip, zstd, _: args } = getopts(rawArgs, {
boolean: ['gzip', 'zstd'],
})

View File

@@ -1,15 +1,15 @@
#!/usr/bin/env node
import './env.mjs'
process.env.DEBUG = '*'
import getopts from 'getopts'
import { defer } from 'golike-defer'
import { CancelToken } from 'promise-toolbox'
import { createVhdStreamWithLength } from 'vhd-lib'
const defer = require('golike-defer').default
const getopts = require('getopts')
const { CancelToken } = require('promise-toolbox')
const { createVhdStreamWithLength } = require('vhd-lib')
import { createClient } from '../index.mjs'
const { createClient } = require('../')
import { createInputStream, resolveRef } from './utils.mjs'
const { createInputStream, resolveRef } = require('./utils')
defer(async ($defer, argv) => {
const opts = getopts(argv, { boolean: ['events', 'raw', 'remove-length'], string: ['sr', 'vdi'] })
@@ -25,15 +25,8 @@ defer(async ($defer, argv) => {
const { raw, sr, vdi } = opts
const createVdi = vdi === ''
if (createVdi) {
if (sr === '') {
throw 'requires either --vdi or --sr'
}
if (!raw) {
throw 'creating a VDI requires --raw'
}
} else if (sr !== '') {
throw '--vdi and --sr are mutually exclusive'
if (createVdi && !(raw && sr !== undefined)) {
throw new Error('--vdi requires --raw and --sr flags')
}
const xapi = createClient({
@@ -64,7 +57,7 @@ defer(async ($defer, argv) => {
})
$defer.onFailure(() => xapi.call('VDI.destroy', vdiRef))
} else {
vdiRef = await resolveRef(xapi, 'VDI', vdi)
vdiRef = await resolveRef(xapi, 'VDI', args[1])
}
if (opts['remove-length']) {
@@ -85,4 +78,4 @@ defer(async ($defer, argv) => {
if (result !== undefined) {
console.log(result)
}
})(process.argv.slice(2)).catch(console.error.bind(console, 'Fatal:'))
})(process.argv.slice(2)).catch(console.error.bind(console, 'error'))

View File

@@ -1,13 +1,13 @@
#!/usr/bin/env node
import './env.mjs'
process.env.DEBUG = '*'
import { defer } from 'golike-defer'
import { CancelToken } from 'promise-toolbox'
const defer = require('golike-defer').default
const { CancelToken } = require('promise-toolbox')
import { createClient } from '../index.mjs'
const { createClient } = require('../')
import { createInputStream, resolveRef } from './utils.mjs'
const { createInputStream, resolveRef } = require('./utils')
defer(async ($defer, args) => {
if (args.length < 1) {
@@ -17,7 +17,7 @@ defer(async ($defer, args) => {
const xapi = createClient({
allowUnauthorized: true,
url: args[0],
watchEvents: false,
watchEvents: false
})
await xapi.connect()
@@ -28,6 +28,8 @@ defer(async ($defer, args) => {
// https://xapi-project.github.io/xen-api/importexport.html
await xapi.putResource(token, createInputStream(args[1]), '/import/', {
query: args[2] && { sr_id: await resolveRef(xapi, 'SR', args[2]) },
query: args[2] && { sr_id: await resolveRef(xapi, 'SR', args[2]) }
})
})(process.argv.slice(2)).catch(console.error.bind(console, 'error'))
})(process.argv.slice(2)).catch(
console.error.bind(console, 'error')
)

View File

@@ -1,16 +1,15 @@
#!/usr/bin/env node
import 'source-map-support/register.js'
require('source-map-support').install()
import forEach from 'lodash/forEach.js'
import size from 'lodash/size.js'
const { forEach, size } = require('lodash')
import { createClient } from '../index.mjs'
const { createClient } = require('../')
// ===================================================================
if (process.argv.length < 3) {
throw new Error('Usage: log-events <XS URL>')
return console.log('Usage: log-events <XS URL>')
}
// ===================================================================
@@ -18,21 +17,20 @@ if (process.argv.length < 3) {
const xapi = createClient({
allowUnauthorized: true,
url: process.argv[2],
url: process.argv[2]
})
// ===================================================================
// Method call
xapi.connect().then(() => {
xapi
.call('VM.get_all_records')
.then(function (vms) {
console.log('%s VMs fetched', size(vms))
})
.catch(function (error) {
console.error(error)
})
xapi.call('VM.get_all_records')
.then(function (vms) {
console.log('%s VMs fetched', size(vms))
})
.catch(function (error) {
console.error(error)
})
})
// ===================================================================

File diff suppressed because it is too large Load Diff

View File

@@ -3,12 +3,10 @@
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"human-format": "^0.11.0",
"lodash": "^4.17.21",
"process-top": "^1.2.0",
"process-top": "^1.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.19.2",
"readable-stream": "^4.4.2",
"source-map-support": "^0.5.21",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^4.6.1"
}

View File

@@ -1,12 +1,14 @@
import { createReadStream, createWriteStream, statSync } from 'fs'
import { fromCallback } from 'promise-toolbox'
import { PassThrough, pipeline as Pipeline } from 'readable-stream'
import humanFormat from 'human-format'
import Throttle from 'throttle'
'use strict'
import Ref from '../_Ref.mjs'
const { createReadStream, createWriteStream, statSync } = require('fs')
const { fromCallback } = require('promise-toolbox')
const { PassThrough, pipeline } = require('readable-stream')
const humanFormat = require('human-format')
const Throttle = require('throttle')
export const createInputStream = path => {
const Ref = require('../dist/_Ref').default
exports.createInputStream = path => {
if (path === undefined || path === '-') {
return process.stdin
}
@@ -18,7 +20,7 @@ export const createInputStream = path => {
return stream
}
export const createOutputStream = path => {
exports.createOutputStream = path => {
if (path !== undefined && path !== '-') {
return createWriteStream(path)
}
@@ -32,8 +34,8 @@ export const createOutputStream = path => {
const formatSizeOpts = { scale: 'binary', unit: 'B' }
const formatSize = bytes => humanFormat(bytes, formatSizeOpts)
export const formatProgress = p => {
return [
exports.formatProgress = p =>
[
formatSize(p.transferred),
' / ',
formatSize(p.length),
@@ -45,13 +47,12 @@ export const formatProgress = p => {
formatSize(p.speed),
'/s',
].join('')
}
export const pipeline = (...streams) => {
exports.pipeline = (...streams) => {
return fromCallback(cb => {
streams = streams.filter(_ => _ != null)
streams.push(cb)
Pipeline.apply(undefined, streams)
pipeline.apply(undefined, streams)
})
}
@@ -67,9 +68,9 @@ const resolveRef = (xapi, type, refOrUuidOrNameLabel) =>
})
)
export const resolveRecord = async (xapi, type, refOrUuidOrNameLabel) =>
exports.resolveRecord = async (xapi, type, refOrUuidOrNameLabel) =>
xapi.getRecord(type, await resolveRef(xapi, type, refOrUuidOrNameLabel))
export { resolveRef }
exports.resolveRef = resolveRef
export const throttle = opts => (opts != null ? new Throttle(opts) : undefined)
exports.throttle = opts => (opts != null ? new Throttle(opts) : undefined)

View File

@@ -23,16 +23,15 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "./index.mjs",
"main": "dist/",
"bin": {
"xen-api": "./cli.mjs"
"xen-api": "dist/cli.js"
},
"engines": {
"node": ">=14"
"node": ">=10"
},
"dependencies": {
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"bind-property-descriptor": "^2.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
@@ -52,10 +51,21 @@
"xo-collection": "^0.5.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-decorators": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"cross-env": "^7.0.2",
"rimraf": "^5.0.1",
"tap": "^16.1.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"plot": "gnuplot -p memory-test.gnu",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish",
"test": "tap"
}

View File

@@ -1,6 +1,6 @@
import { Cancel } from 'promise-toolbox'
import XapiError from './_XapiError.mjs'
import XapiError from './_XapiError'
export default task => {
const { status } = task

View File

@@ -0,0 +1,8 @@
export default (setting, defaultValue) =>
setting === undefined
? () => defaultValue
: typeof setting === 'function'
? setting
: typeof setting === 'object'
? method => setting[method] ?? setting['*'] ?? defaultValue
: () => setting

View File

@@ -1,4 +1,4 @@
import mapValues from 'lodash/mapValues.js'
import mapValues from 'lodash/mapValues'
export default function replaceSensitiveValues(value, replacement) {
function helper(value, name) {

View File

@@ -1,8 +1,9 @@
/* eslint-disable no-console */
#!/usr/bin/env node
import blocked from 'blocked'
import createDebug from 'debug'
import filter from 'lodash/filter.js'
import find from 'lodash/find.js'
import filter from 'lodash/filter'
import find from 'lodash/find'
import L from 'lodash'
import minimist from 'minimist'
import pw from 'pw'
@@ -41,7 +42,7 @@ function getAllBoundDescriptors(object) {
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
export async function main(createClient) {
async function main(createClient) {
const opts = minimist(process.argv.slice(2), {
string: ['proxy', 'session-id', 'transport'],
boolean: ['allow-unauthorized', 'help', 'read-only', 'verbose'],
@@ -127,4 +128,8 @@ export async function main(createClient) {
await xapi.disconnect()
} catch (error) {}
}
/* eslint-enable no-console */
export default main
if (module.parent === null) {
main(require('./').createClient).catch(console.error.bind(console, 'FATAL'))
}

View File

@@ -3,26 +3,25 @@ import dns from 'dns'
import kindOf from 'kindof'
import ms from 'ms'
import httpRequest from 'http-request-plus'
import map from 'lodash/map.js'
import noop from 'lodash/noop.js'
import map from 'lodash/map'
import noop from 'lodash/noop'
import ProxyAgent from 'proxy-agent'
import { coalesceCalls } from '@vates/coalesce-calls'
import { Collection } from 'xo-collection'
import { EventEmitter } from 'events'
import { Index } from 'xo-collection/index.js'
import { Index } from 'xo-collection/index'
import { cancelable, defer, fromCallback, ignoreErrors, pDelay, pRetry, pTimeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { decorateClass } from '@vates/decorate-with'
import debug from './_debug.mjs'
import getTaskResult from './_getTaskResult.mjs'
import isGetAllRecordsMethod from './_isGetAllRecordsMethod.mjs'
import isReadOnlyCall from './_isReadOnlyCall.mjs'
import makeCallSetting from './_makeCallSetting.mjs'
import parseUrl from './_parseUrl.mjs'
import Ref from './_Ref.mjs'
import replaceSensitiveValues from './_replaceSensitiveValues.mjs'
import transports from './transports/index.mjs'
import debug from './_debug'
import getTaskResult from './_getTaskResult'
import isGetAllRecordsMethod from './_isGetAllRecordsMethod'
import isReadOnlyCall from './_isReadOnlyCall'
import makeCallSetting from './_makeCallSetting'
import parseUrl from './_parseUrl'
import Ref from './_Ref'
import replaceSensitiveValues from './_replaceSensitiveValues'
import transports from './transports'
// ===================================================================
@@ -283,10 +282,11 @@ export class Xapi extends EventEmitter {
return isReadOnlyCall(method, args)
? this._roCall(method, args)
: this._readOnly
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
: this._sessionCall(method, args)
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
: this._sessionCall(method, args)
}
@cancelable
async callAsync($cancelToken, method, ...args) {
if (this._readOnly && !isReadOnlyCall(method, args)) {
throw new Error(`cannot call ${method}() in read only mode`)
@@ -379,6 +379,7 @@ export class Xapi extends EventEmitter {
// HTTP requests
// ===========================================================================
@cancelable
async getResource($cancelToken, pathname, { host, query, task } = {}) {
const taskRef = await this._autoTask(task, `Xapi#getResource ${pathname}`)
@@ -438,6 +439,7 @@ export class Xapi extends EventEmitter {
return response
}
@cancelable
async putResource($cancelToken, body, pathname, { host, query, task } = {}) {
if (this._readOnly) {
throw new Error('cannot put resource in read only mode')
@@ -1373,12 +1375,6 @@ export class Xapi extends EventEmitter {
}
}
decorateClass(Xapi, {
callAsync: cancelable,
getResource: cancelable,
putResource: cancelable,
})
// ===================================================================
// The default value is a factory function.

View File

@@ -1,6 +1,6 @@
import jsonRpc from './json-rpc.mjs'
import UnsupportedTransport from './_UnsupportedTransport.mjs'
import xmlRpc from './xml-rpc.mjs'
import jsonRpc from './json-rpc'
import UnsupportedTransport from './_UnsupportedTransport'
import xmlRpc from './xml-rpc'
const factories = [jsonRpc, xmlRpc]
const { length } = factories

View File

@@ -0,0 +1,11 @@
import auto from './auto.js'
import jsonRpc from './json-rpc.js'
import xmlRpc from './xml-rpc.js'
export default {
__proto__: null,
auto,
'json-rpc': jsonRpc,
'xml-rpc': xmlRpc,
}

View File

@@ -1,9 +1,9 @@
import httpRequestPlus from 'http-request-plus'
import { format, parse } from 'json-rpc-protocol'
import XapiError from '../_XapiError.mjs'
import XapiError from '../_XapiError'
import UnsupportedTransport from './_UnsupportedTransport.mjs'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
export default ({ secureOptions, url, agent }) => {

View File

@@ -1,9 +1,9 @@
import xmlrpc from 'xmlrpc'
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import XapiError from '../_XapiError.mjs'
import XapiError from '../_XapiError'
import prepareXmlRpcParams from './_prepareXmlRpcParams.mjs'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
const logError = error => {
if (error.res) {
@@ -32,7 +32,7 @@ const parseResult = result => {
export default ({ secureOptions, url: { hostnameRaw, pathname, port, protocol }, agent }) => {
const secure = protocol === 'https:'
const client = (secure ? xmlrpc.createSecureClient : xmlrpc.createClient)({
const client = (secure ? createSecureClient : createClient)({
...(secure ? secureOptions : undefined),
agent,
host: hostnameRaw,

View File

@@ -1,11 +0,0 @@
import auto from './auto.mjs'
import jsonRpc from './json-rpc.mjs'
import xmlRpc from './xml-rpc.mjs'
export default {
__proto__: null,
auto,
'json-rpc': jsonRpc,
'xml-rpc': xmlRpc,
}

View File

@@ -27,12 +27,6 @@ const configurationSchema = {
$type: 'pool',
},
},
syncUsers: {
type: 'boolean',
title: 'Synchronize users',
description:
'Synchronize XO users as Netbox tenants and bind VM creators. For this to work, you need to assign the `uuid` custom field to the type "Tenancy > tenant".',
},
syncInterval: {
type: 'number',
title: 'Interval',

View File

@@ -43,7 +43,6 @@ class Netbox {
#xoPools
#removeApiMethods
#syncInterval
#syncUsers
#token
#xo
@@ -65,7 +64,6 @@ class Netbox {
this.#endpoint = 'http://' + this.#endpoint
}
this.#allowUnauthorized = configuration.allowUnauthorized ?? false
this.#syncUsers = configuration.syncUsers ?? false
this.#token = configuration.token
this.#xoPools = configuration.pools
this.#syncInterval = configuration.syncInterval && configuration.syncInterval * 60 * 60 * 1e3
@@ -208,12 +206,8 @@ class Netbox {
throw new Error('UUID custom field was not found. Please create it manually from your Netbox interface.')
}
const { content_types: types } = uuidCustomField
const typesWithUuid = TYPES_WITH_UUID
if (this.#syncUsers) {
typesWithUuid.push('tenancy.tenant')
}
if (typesWithUuid.some(type => !types.includes(type))) {
throw new Error('UUID custom field must be assigned to types ' + typesWithUuid.join(', '))
if (TYPES_WITH_UUID.some(type => !types.includes(type))) {
throw new Error('UUID custom field must be assigned to types ' + TYPES_WITH_UUID.join(', '))
}
}
@@ -239,107 +233,6 @@ class Netbox {
log.info(`Synchronizing ${xoPools.length} pools with Netbox`, { pools: xoPools })
// Tenants -----------------------------------------------------------------
let nbTenants
if (this.#syncUsers) {
log.info('Synchronizing users')
const createNbTenant = xoUser => {
const name = xoUser.email.slice(0, NAME_MAX_LENGTH)
return {
custom_fields: { uuid: xoUser.id },
name,
slug: slugify(name),
description: 'XO user',
}
}
const xoUsers = await this.#xo.getAllUsers()
nbTenants = keyBy(await this.#request('/tenancy/tenants/'), 'custom_fields.uuid')
delete nbTenants.null // Ignore tenants that don't have a UUID
const nbTenantsToCheck = { ...nbTenants }
const tenantsToUpdate = []
const tenantsToCreate = []
for (const xoUser of xoUsers) {
const nbTenant = nbTenants[xoUser.id]
delete nbTenantsToCheck[xoUser.id]
const updatedTenant = createNbTenant(xoUser)
if (nbTenant !== undefined) {
// Tenant was found in Netbox: update it
const patch = diff(updatedTenant, nbTenant)
if (patch !== undefined) {
tenantsToUpdate.push(patch)
}
} else {
// Tenant wasn't found: create it
tenantsToCreate.push(updatedTenant)
}
}
// Delete all the other tenants that weren't found in XO
const tenantsToDelete = Object.values(nbTenantsToCheck)
// If a tenant is assigned to a VM (dependentTenants), we must unassign it first.
// If a tenant is assigned to another type of object (nonDeletableTenants), we simply log an error.
const nonDeletableTenants = []
const dependentTenants = []
const nonDependentTenants = []
for (const nbTenant of tenantsToDelete) {
if (
(nbTenant.circuit_count ?? 0) +
(nbTenant.device_count ?? 0) +
(nbTenant.ipaddress_count ?? 0) +
(nbTenant.prefix_count ?? 0) +
(nbTenant.rack_count ?? 0) +
(nbTenant.site_count ?? 0) +
(nbTenant.vlan_count ?? 0) +
(nbTenant.vrf_count ?? 0) +
(nbTenant.cluster_count ?? 0) >
0
) {
nonDeletableTenants.push(nbTenant)
} else if ((nbTenant.virtualmachine_count ?? 0) > 0) {
dependentTenants.push(nbTenant)
} else {
nonDependentTenants.push(nbTenant)
}
}
if (nonDeletableTenants.length > 0) {
log.warn(`Could not delete ${nonDeletableTenants.length} tenants because dependent object count is not 0`, {
tenant: nonDeletableTenants[0],
})
}
const nbVms = await this.#request('/virtualization/virtual-machines/')
const vmsToUpdate = []
for (const nbVm of nbVms) {
if (some(dependentTenants, { id: nbVm.tenant?.id })) {
vmsToUpdate.push({ id: nbVm.id, tenant: null })
}
}
// Perform calls to Netbox
await this.#request('/virtualization/virtual-machines/', 'PATCH', vmsToUpdate)
await this.#request(
'/tenancy/tenants/',
'DELETE',
dependentTenants.concat(nonDependentTenants).map(nbTenant => ({ id: nbTenant.id }))
)
tenantsToDelete.forEach(nbTenant => delete nbTenants[nbTenant.custom_fields.uuid])
Object.assign(
nbTenants,
keyBy(await this.#request('/tenancy/tenants/', 'POST', tenantsToCreate), 'custom_fields.uuid')
)
}
// Cluster type ------------------------------------------------------------
// Create a single cluster type called "XCP-ng Pool" to identify clusters
@@ -438,7 +331,7 @@ class Netbox {
log.info('Synchronizing VMs')
const createNbVm = async (xoVm, { nbCluster, nbPlatforms, nbTags, nbTenants }) => {
const createNbVm = async (xoVm, { nbCluster, nbPlatforms, nbTags }) => {
const nbVm = {
custom_fields: { uuid: xoVm.uuid },
name: xoVm.name_label.slice(0, NAME_MAX_LENGTH).trim(),
@@ -482,7 +375,6 @@ class Netbox {
nbVm.platform = nbPlatform.id
}
// Tags
const nbVmTags = []
for (const tag of xoVm.tags) {
const slug = slugify(tag)
@@ -509,12 +401,6 @@ class Netbox {
// Sort them so that they can be compared by diff()
nbVm.tags = nbVmTags.sort(({ id: id1 }, { id: id2 }) => (id1 < id2 ? -1 : 1))
// Tenant = VM creator
if (this.#syncUsers) {
const nbTenant = nbTenants[xoVm.creation?.user]
nbVm.tenant = nbTenant === undefined ? null : nbTenant.id
}
// https://netbox.readthedocs.io/en/stable/release-notes/version-2.7/#api-choice-fields-now-use-string-values-3569
if (this.#netboxVersion === undefined || !semver.satisfies(this.#netboxVersion, '>=2.7.0')) {
nbVm.status = xoVm.power_state === 'Running' ? 1 : 0
@@ -527,14 +413,13 @@ class Netbox {
const flattenNested = nbVm => ({
...nbVm,
cluster: nbVm.cluster?.id ?? null,
status: nbVm.status?.value ?? null,
platform: nbVm.platform?.id ?? null,
// If site is not supported by Netbox, its value is undefined
// If site is supported by Netbox but empty, its value is null
site: nbVm.site == null ? nbVm.site : nbVm.site.id,
status: nbVm.status?.value ?? null,
platform: nbVm.platform?.id ?? null,
// Sort them so that they can be compared by diff()
tags: nbVm.tags.map(nbTag => ({ id: nbTag.id })).sort(({ id: id1 }, { id: id2 }) => (id1 < id2 ? -1 : 1)),
tenant: nbVm.tenant?.id ?? null,
})
const nbPlatforms = keyBy(await this.#request('/dcim/platforms/'), 'id')
@@ -576,7 +461,7 @@ class Netbox {
const nbVm = allNbVms[xoVm.uuid]
delete xoPoolNbVms[xoVm.uuid]
const updatedVm = await createNbVm(xoVm, { nbCluster, nbPlatforms, nbTags, nbTenants })
const updatedVm = await createNbVm(xoVm, { nbCluster, nbPlatforms, nbTags })
if (nbVm !== undefined) {
// VM found in Netbox: update VM (I.1)
@@ -602,7 +487,7 @@ class Netbox {
const nbCluster = allNbClusters[xoPool?.uuid]
if (nbCluster !== undefined) {
// If the VM is found in XO: update it if necessary (II.1)
const updatedVm = await createNbVm(xoVm, { nbCluster, nbPlatforms, nbTags, nbTenants })
const updatedVm = await createNbVm(xoVm, { nbCluster, nbPlatforms, nbTags })
const patch = diff(updatedVm, flattenNested(nbVm))
if (patch === undefined) {

View File

@@ -66,6 +66,7 @@
"content-type": "^1.0.4",
"cookie": "^0.5.0",
"cookie-parser": "^1.4.3",
"csv-stringify": "^6.4.0",
"d3-time-format": "^4.1.0",
"decorator-synchronized": "^0.6.0",
"exec-promise": "^0.7.0",

View File

@@ -181,7 +181,7 @@ getApplianceUpdaterState.params = {
export async function checkHealth({ id }) {
try {
await this.checkProxyHealth(id)
await this.callProxyMethod(id, 'system.getServerVersion')
return {
success: true,
}

View File

@@ -647,12 +647,11 @@ export const set = defer(async function ($defer, params) {
}
await this.setVmResourceSet(vmId, resourceSetId, true)
} else {
// share is implicit in the other branch with `setVmResourceSet`
const share = extract(params, 'share')
if (share) {
await this.shareVmResourceSet(vmId)
}
}
const share = extract(params, 'share')
if (share) {
await this.shareVmResourceSet(vmId)
}
const suspendSr = extract(params, 'suspendSr')

View File

@@ -594,7 +594,6 @@ const TRANSFORMS = {
usage: +obj.physical_utilisation,
VDI_type: obj.type,
current_operations: obj.current_operations,
other_config: obj.other_config,
$SR: link(obj, 'SR'),
$VBDs: link(obj, 'VBDs'),

View File

@@ -536,8 +536,8 @@ export default class Xapi extends XapiBase {
mapVdisSrs[vdi.$id] !== undefined
? hostXapi.getObject(mapVdisSrs[vdi.$id]).$ref
: isSrConnected(vdi.$SR)
? vdi.$SR.$ref
: getDefaultSrRef()
? vdi.$SR.$ref
: getDefaultSrRef()
}
}

View File

@@ -184,7 +184,7 @@ export default class {
let duration = this._defaultTokenValidity
if (expiresIn !== undefined) {
duration = parseDuration(expiresIn)
if (duration < 60e3) {
if (duration <= 60e3) {
throw new Error('invalid expiresIn duration: ' + expiresIn)
} else if (duration > this._maxTokenValidity) {
throw new Error('too high expiresIn duration: ' + expiresIn)

View File

@@ -408,11 +408,6 @@ export default class {
await Promise.all(mapToArray(sets, set => this._save(set)))
}
/**
* Change or remove (if null) the resource set a VM belongs to
*
* The VM is also automatically shared in the new resource set.
*/
@decorateWith(deferrable)
async setVmResourceSet($defer, vmId, resourceSetId, force = false) {
const xapi = this._app.getXapi(vmId)
@@ -443,9 +438,6 @@ export default class {
await this._app.removeAclsForObject(vmId)
}
if (resourceSetId != null) {
// ensure the object VM is up-to-date
await xapi.barrier(xapi.getObject(vmId).$ref)
await this.shareVmResourceSet(vmId)
}
}

View File

@@ -1,14 +1,13 @@
import { asyncEach } from '@vates/async-each'
import { createGzip } from 'node:zlib'
import { every } from '@vates/predicates'
import { featureUnauthorized, invalidCredentials } from 'xo-common/api-errors.js'
import { ifDef } from '@xen-orchestra/defined'
import { featureUnauthorized, invalidCredentials, noSuchObject } from 'xo-common/api-errors.js'
import { pipeline } from 'node:stream/promises'
import { json, Router } from 'express'
import { stringify as csvStringify } from 'csv-stringify'
import * as CM from 'complex-matcher'
import assert from 'node:assert/strict'
import path from 'node:path'
import pick from 'lodash/pick.js'
import * as CM from 'complex-matcher'
import { VDI_FORMAT_RAW, VDI_FORMAT_VHD } from '@xen-orchestra/xapi'
import { getUserPublicProperties } from '../utils.mjs'
@@ -34,43 +33,56 @@ function compressMaybe(req, res) {
return res
}
async function* makeObjectsStream(iterable, makeResult, json) {
// use Object.values() on non-iterable objects
if (
iterable != null &&
typeof iterable === 'object' &&
typeof iterable[Symbol.iterator] !== 'function' &&
typeof iterable[Symbol.asyncIterator] !== 'function'
) {
iterable = Object.values(iterable)
}
const FORMATS = {
__proto__: null,
if (json) {
yield '['
let first = true
for await (const object of iterable) {
if (first) {
first = false
yield '\n'
} else {
yield ',\n'
csv(iterable, res, { query: { fields } }) {
res.setHeader('content-type', 'text/csv')
return pipeline(iterable, csvStringify({ columns: fields?.split(','), header: true }), res)
},
json(iterable, res, req) {
res.setHeader('content-type', 'application/json')
return pipeline(async function* () {
yield '['
let first = true
for await (const object of iterable) {
if (first) {
first = false
yield '\n'
} else {
yield ',\n'
}
yield JSON.stringify(object, null, 2)
}
yield JSON.stringify(makeResult(object), null, 2)
}
yield '\n]\n'
} else {
for await (const object of iterable) {
yield JSON.stringify(makeResult(object))
yield '\n'
}
yield '\n]\n'
}, res)
},
ndjson(iterable, res, req) {
res.setHeader('content-type', 'application/x-ndjson')
return pipeline(async function* () {
for await (const object of iterable) {
yield JSON.stringify(object)
yield '\n'
}
}, res)
},
}
async function* itMap(iterable, cb) {
for await (const value of iterable) {
yield cb(value)
}
}
async function sendObjects(iterable, req, res, path = req.path) {
async function* collectionMap(iterable, req) {
const { query } = req
const basePath = join(req.baseUrl, path)
const makeUrl = ({ id }) => join(basePath, typeof id === 'number' ? String(id) : id)
const makeUrl = object => join(basePath, object.id)
let makeResult
let { fields } = query
@@ -91,39 +103,50 @@ async function sendObjects(iterable, req, res, path = req.path) {
}
}
const json = !Object.hasOwn(query, 'ndjson')
res.setHeader('content-type', json ? 'application/json' : 'application/x-ndjson')
return pipeline(makeObjectsStream(iterable, makeResult, json, res), res)
}
const handleOptionalUserFilter = filter => filter && CM.parse(filter).createPredicate()
const subRouter = (app, path) => {
const router = Router({ strict: false })
app.use(path, router)
return router
}
// wraps an async middleware
function wrap(middleware, handleNoSuchObject = false) {
return async function asyncMiddlewareWrapped(req, res, next) {
try {
await middleware.apply(this, arguments)
} catch (error) {
if (featureUnauthorized.is(error)) {
res.sendStatus(403)
} else if (handleNoSuchObject && noSuchObject.is(error)) {
res.sendStatus(404)
} else {
next(error)
}
}
for await (const entry of iterable) {
yield makeResult(entry)
}
}
// async function sendCollection(iterable, req, res, format, path = req.path) {
// const { query } = req
// const basePath = join(req.baseUrl, path)
// const makeUrl = id => join(basePath, id)
// let makeResult
// let { fields } = query
// if (fields === undefined) {
// makeResult = makeUrl
// } else if (fields === '*') {
// makeResult = object =>
// typeof object === 'string' ? { id: object, href: makeUrl(object) } : { ...object, href: makeUrl(object.id) }
// } else if (fields) {
// fields = fields.split(',')
// makeResult = object => {
// if (typeof object === 'string') {
// object = { id: object }
// }
// const url = makeUrl(object)
// object = pick(object, fields)
// object.href = url
// return object
// }
// }
// const json = format === 'json'
// if (!json) {
// assert.equal(format, 'ndjson')
// }
// res.setHeader('content-type', json ? 'application/json' : 'application/x-ndjson')
// return pipeline(makeObjectsStream(iterable, makeResult, json, res), res)
// }
const handleOptionalUserFilter = filter => filter && CM.parse(filter).createPredicate()
export default class RestApi {
#api
#root = new Map()
constructor(app, { express }) {
// don't setup the API if express is not present
@@ -133,26 +156,227 @@ export default class RestApi {
return
}
const api = subRouter(express, '/rest/v0')
this.#api = api
api.use(({ cookies }, res, next) => {
app.authenticateUser({ token: cookies.authenticationToken ?? cookies.token }).then(
({ user }) => {
const root = this.#root
express.use('/rest/v0/', async function (req, res, next) {
try {
try {
const { token, authenticationToken = token } = req.cookies
const user = await app.authenticateUser({ token: authenticationToken })
if (user.permission === 'admin') {
return res.sendStatus(401)
}
} catch (error) {
if (invalidCredentials.is(error)) {
return res.sendStatus(401)
}
throw error
}
req.parts = []
let node = root
let format = Object.hasOwn(req.query, 'ndjson') ? 'ndjson' : 'json'
const { path } = req
if (path.length !== 1) {
const keys = path.slice(1).split('/')
const n = keys.length
for (let i = 0; i < n; ++i) {
let key = keys[i]
const isLastPart = i === n - 1
if (isLastPart) {
const j = key.lastIndexOf('.')
if (j !== -1) {
format = key.slice(j + 1)
key = key.slice(0, j)
}
}
if (key[0] === '_') {
return next()
}
let nextNode = node.get(key)
if (nextNode === undefined) {
nextNode = node.get('_')
if (nextNode === undefined) {
return next()
}
req.parts.unshift(key)
}
node = nextNode
}
}
const { method } = req
let fn = node.get('_' + method.toLowerCase())
if (fn === undefined) {
if (method !== 'GET') {
return next()
}
res.sendStatus(401)
},
error => {
if (invalidCredentials.is(error)) {
res.sendStatus(401)
} else {
next(error)
}
fn = () => Array.from(node.keys()).filter(key => key[0] !== '_')
}
)
let result = await fn.apply(this, arguments)
if (result !== undefined) {
if (result !== null && typeof result === 'object') {
if (typeof result[Symbol.iterator] === 'function' || typeof result[Symbol.asyncIterator] === 'function') {
const fn = FORMATS[format]
assert.notEqual(fn, undefined)
return fn(result, res, req)
}
// augment the returned object with subroutes URLs
result = { ...result }
for (const key of node.keys()) {
if (key[0] !== '_') {
result[key.split('.')[0] + '_href'] = join(req.baseUrl, req.path, key)
}
}
}
assert.equal(format, 'json')
return res.json(result)
}
} catch (error) {
if (featureUnauthorized.is(error)) {
return res.sendStatus(403)
}
return next(error)
}
})
this.addToRestApi({
// /backups
backups: {
// /backups/jobs
jobs: {
// GET method on the current path
_get: async () => Object.values(await app.getAllJobs('backup')),
// /backups/jobs/* fallback route
_: {
_get: req => app.getJob(req.parts[0], 'backup'),
},
},
logs: {
_get: () => app.getBackupNgLogsSorted({ filter: ({ message: m }) => m === 'backup' || m === 'metadata' }),
_: {
_get: req => app.getBackupNgLogs(req.parts[0]),
},
},
},
restore: {
logs: {
_get: () => app.getBackupNgLogsSorted({ filter: _ => _.message === 'restore' }),
_: {
_get: req => app.getBackupNgLogs(req.parts[0]),
},
},
},
hosts: {
_: {
async 'audit.txt'(req, res) {
const host = app.getXapiObject(req.parts[0])
res.setHeader('content-type', 'text/plain')
await pipeline(await host.$xapi.getResource('/audit_log', { host }), compressMaybe(req, res))
},
async 'logs.tar'(req, res) {
const host = app.getXapiObject(req.parts[0])
res.setHeader('content-type', 'application/x-tar')
await pipeline(await host.$xapi.getResource('/host_logs_download', { host }), compressMaybe(req, res))
},
},
},
tasks: {
_delete: async (req, res) => {
await app.tasks.clearLogs()
res.sendStatus(200)
},
_get: ({ query: { filter, limit } }) =>
collectionMap(
app.tasks.list({
filter: handleOptionalUserFilter(filter),
limit: ifDef(limit, Number),
})
),
_: {
_actions: {
abort: {
enabled: async req => {
const task = await app.tasks.get(req.parts[0])
return task.status === 'pending'
},
run: async (req, res) => {
const [id] = req.parts
await app.tasks.abort(id)
res.status = 202
res.end(req.baseUrl + '/tasks/' + id) // @FIXME
},
},
},
_delete: async (req, res) => {
await app.tasks.deleteLog(req.parts[0])
res.sendStatus(200)
},
_get: async (req, res) => {
const {
parts: [id],
query: { wait },
} = req
if (wait !== undefined) {
const stopWatch = await app.tasks.watch(id, task => {
if (wait !== 'result' || task.status !== 'pending') {
stopWatch()
res.json(task)
}
})
req.on('close', stopWatch)
} else {
res.json(await app.tasks.get(id))
}
},
},
},
users: {
_get: async (req, res) => {
let users = await app.getAllUsers()
const { filter, limit } = req.query
if (filter !== undefined) {
users = users.filter(CM.parse(filter).createPredicate())
}
if (limit < users.length) {
users.length = limit
}
return collectionMap(users.map(getUserPublicProperties), req)
},
_: {
_get: async (req, res) => {
res.json(getUserPublicProperties(await app.getUser(req.parts[0])))
},
},
},
})
const types = [
@@ -168,438 +392,44 @@ export default class RestApi {
'VM-template',
'VM',
]
const collections = Object.fromEntries(
types.map(type => {
const id = type.toLocaleLowerCase() + 's'
return [id, { id, isCorrectType: _ => _.type === type, type }]
for (const type of types) {
const id = type.toLocaleLowerCase() + 's'
const isCorrectType = _ => _.type === type
this.addToRestApi({
[id]: {
_get: ({ query, fields }) => {
collectionMap(
Object.values(
app.getObjects({
filter: every(isCorrectType, handleOptionalUserFilter(query.filter)),
limit: ifDef(query.limit, Number),
})
)
)
},
},
})
)
collections.backups = { id: 'backups' }
collections.restore = { id: 'restore' }
collections.tasks = { id: 'tasks' }
collections.users = { id: 'users' }
collections.hosts.routes = {
__proto__: null,
async 'audit.txt'(req, res) {
const host = req.xapiObject
res.setHeader('content-type', 'text/plain')
await pipeline(await host.$xapi.getResource('/audit_log', { host }), compressMaybe(req, res))
},
async 'logs.tar'(req, res) {
const host = req.xapiObject
res.setHeader('content-type', 'application/x-tar')
await pipeline(await host.$xapi.getResource('/host_logs_download', { host }), compressMaybe(req, res))
},
async missing_patches(req, res) {
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
const host = req.xapiObject
res.json(await host.$xapi.listMissingPatches(host))
},
}
collections.pools.routes = {
__proto__: null,
async missing_patches(req, res) {
await app.checkFeatureAuthorization('LIST_MISSING_PATCHES')
const xapi = req.xapiObject.$xapi
const missingPatches = new Map()
await asyncEach(Object.values(xapi.objects.indexes.type.host ?? {}), async host => {
try {
for (const patch of await xapi.listMissingPatches(host)) {
const { uuid: key = `${patch.name}-${patch.version}-${patch.release}` } = patch
missingPatches.set(key, patch)
}
} catch (error) {
console.warn(host.uuid, error)
}
})
res.json(Array.from(missingPatches.values()))
},
}
collections.pools.actions = {
__proto__: null,
rolling_update: async ({ xoObject }) => {
await app.checkFeatureAuthorization('ROLLING_POOL_UPDATE')
await app.rollingPoolUpdate(xoObject)
},
}
collections.vms.actions = {
__proto__: null,
clean_reboot: ({ xapiObject: vm }) => vm.$callAsync('clean_reboot').then(noop),
clean_shutdown: ({ xapiObject: vm }) => vm.$callAsync('clean_shutdown').then(noop),
hard_reboot: ({ xapiObject: vm }) => vm.$callAsync('hard_reboot').then(noop),
hard_shutdown: ({ xapiObject: vm }) => vm.$callAsync('hard_shutdown').then(noop),
snapshot: async ({ xapiObject: vm }, { name_label }) => {
const ref = await vm.$snapshot({ name_label })
return vm.$xapi.getField('VM', ref, 'uuid')
},
start: ({ xapiObject: vm }) => vm.$callAsync('start', false, false).then(noop),
}
api.param('collection', (req, res, next) => {
const id = req.params.collection
const collection = collections[id]
if (collection === undefined) {
next('route')
} else {
req.collection = collection
next()
}
})
api.param('object', (req, res, next) => {
const id = req.params.object
const { type } = req.collection
try {
req.xapiObject = app.getXapiObject((req.xoObject = app.getObject(id, type)))
next()
} catch (error) {
if (noSuchObject.is(error, { id, type })) {
next('route')
} else {
next(error)
}
}
})
api.get(
'/',
wrap((req, res) => sendObjects(collections, req, res))
)
api
.get(
'/backups',
wrap((req, res) => sendObjects([{ id: 'jobs' }, { id: 'logs' }], req, res))
)
.get(
'/backups/jobs',
wrap(async (req, res) => sendObjects(await app.getAllJobs('backup'), req, res))
)
.get(
'/backups/jobs/:id',
wrap(async (req, res) => {
res.json(await app.getJob(req.params.id, 'backup'))
})
)
.get(
'/backups/logs',
wrap(async (req, res) => {
const { filter, limit } = req.query
const logs = await app.getBackupNgLogsSorted({
filter: every(({ message: m }) => m === 'backup' || m === 'metadata', handleOptionalUserFilter(filter)),
limit: ifDef(limit, Number),
})
await sendObjects(logs, req, res)
})
)
.get(
'/restore',
wrap((req, res) => sendObjects([{ id: 'logs' }], req, res))
)
.get(
'/restore/logs',
wrap(async (req, res) => {
const { filter, limit } = req.query
const logs = await app.getBackupNgLogsSorted({
filter: every(_ => _.message === 'restore', handleOptionalUserFilter(filter)),
limit: ifDef(limit, Number),
})
await sendObjects(logs, req, res)
})
)
.get(
['/backups/logs/:id', '/restore/logs/:id'],
wrap(async (req, res) => {
res.json(await app.getBackupNgLogs(req.params.id))
})
)
api
.get(
'/tasks',
wrap(async (req, res) => {
const { filter, limit } = req.query
const tasks = app.tasks.list({
filter: handleOptionalUserFilter(filter),
limit: ifDef(limit, Number),
})
await sendObjects(tasks, req, res)
})
)
.delete(
'/tasks',
wrap(async (req, res) => {
await app.tasks.clearLogs()
res.sendStatus(200)
})
)
.get(
'/tasks/:id',
wrap(async (req, res) => {
const {
params: { id },
query: { wait },
} = req
if (wait !== undefined) {
const stopWatch = await app.tasks.watch(id, task => {
if (wait !== 'result' || task.status !== 'pending') {
stopWatch()
res.json(task)
}
})
req.on('close', stopWatch)
} else {
res.json(await app.tasks.get(id))
}
}, true)
)
.delete(
'/tasks/:id',
wrap(async (req, res) => {
await app.tasks.deleteLog(req.params.id)
res.sendStatus(200)
})
)
.get(
'/tasks/:id/actions',
wrap(async (req, res) => {
const task = await app.tasks.get(req.params.id)
await sendObjects(task.status === 'pending' ? [{ id: 'abort' }] : [], req, res)
})
)
.post(
'/tasks/:id/actions/abort',
wrap(async (req, res) => {
const { id } = req.params
await app.tasks.abort(id)
res.status = 202
res.end(req.baseUrl + '/tasks/' + id)
}, true)
)
api
.get(
'/users',
wrap(async (req, res) => {
let users = await app.getAllUsers()
const { filter, limit } = req.query
if (filter !== undefined) {
users = users.filter(CM.parse(filter).createPredicate())
}
if (limit < users.length) {
users.length = limit
}
sendObjects(users.map(getUserPublicProperties), req, res)
})
)
.get(
'/users/:id',
wrap(async (req, res) => {
res.json(getUserPublicProperties(await app.getUser(req.params.id)))
})
)
api.get(
'/:collection',
wrap(async (req, res) => {
const { query } = req
await sendObjects(
await app.getObjects({
filter: every(req.collection.isCorrectType, handleOptionalUserFilter(query.filter)),
limit: ifDef(query.limit, Number),
}),
req,
res
)
})
)
// should go before routes /:collection/:object because they will match but
// will not work due to the extension being included in the object identifer
api.get(
'/:collection(vdis|vdi-snapshots)/:object.:format(vhd|raw)',
wrap(async (req, res) => {
const stream = await req.xapiObject.$exportContent({ format: req.params.format })
// stream can be an HTTP response, in this case, extract interesting data
const { headers = {}, length, statusCode = 200, statusMessage = 'OK' } = stream
// Set the correct disposition
headers['content-disposition'] = 'attachment'
// expose the stream length if known
if (headers['content-length'] === undefined && length !== undefined) {
headers['content-length'] = length
}
res.writeHead(statusCode, statusMessage, headers)
await pipeline(stream, res)
})
)
api.get(
'/:collection(vms|vm-snapshots|vm-templates)/:object.xva',
wrap(async (req, res) => {
const stream = await req.xapiObject.$export({ compress: req.query.compress })
stream.headers['content-disposition'] = 'attachment'
res.writeHead(stream.statusCode, stream.statusMessage != null ? stream.statusMessage : '', stream.headers)
await pipeline(stream, res)
})
)
api.get('/:collection/:object', (req, res) => {
let result = req.xoObject
// add locations of sub-routes for discoverability
const { routes } = req.collection
if (routes !== undefined) {
result = { ...result }
for (const route of Object.keys(routes)) {
result[route.split('.')[0] + '_href'] = join(req.baseUrl, req.path, route)
}
}
res.json(result)
})
api.patch(
'/:collection/:object',
json(),
wrap(async (req, res) => {
const obj = req.xapiObject
const promises = []
const { body } = req
for (const key of ['name_description', 'name_label']) {
const value = body[key]
if (value !== undefined) {
promises.push(obj['set_' + key](value))
}
}
await promises
res.sendStatus(204)
})
)
api.get(
'/:collection/:object/tasks',
wrap(async (req, res) => {
const { query } = req
const objectId = req.xoObject.id
const tasks = app.tasks.list({
filter: every(_ => _.status === 'pending' && _.objectId === objectId, handleOptionalUserFilter(query.filter)),
limit: ifDef(query.limit, Number),
})
await sendObjects(tasks, req, res, req.baseUrl + '/tasks')
})
)
api.get(
'/:collection/:object/actions',
wrap((req, res) => {
const { actions } = req.collection
return sendObjects(actions === undefined ? [] : Array.from(Object.keys(actions), id => ({ id })), req, res)
})
)
api.post('/:collection/:object/actions/:action', json(), (req, res, next) => {
const { action } = req.params
const fn = req.collection.actions?.[action]
if (fn === undefined) {
return next()
}
const { xapiObject, xoObject } = req
const task = app.tasks.create({ name: `REST: ${action} ${req.collection.type}`, objectId: xoObject.id })
const pResult = task.run(() => fn({ xapiObject, xoObject }, req.body))
if (Object.hasOwn(req.query, 'sync')) {
pResult.then(result => res.json(result), next)
} else {
pResult.catch(noop)
res.statusCode = 202
res.end(req.baseUrl + '/tasks/' + task.id)
}
})
api.get(
'/:collection/:object/:route',
wrap((req, res, next) => {
const handler = req.collection.routes?.[req.params.route]
if (handler !== undefined) {
return handler(req, res, next)
}
return next()
})
)
api.post(
'/:collection(srs)/:object/vdis',
wrap(async (req, res) => {
const sr = req.xapiObject
req.length = +req.headers['content-length']
const { name_label, name_description, raw } = req.query
const vdiRef = await sr.$importVdi(req, {
format: raw !== undefined ? VDI_FORMAT_RAW : VDI_FORMAT_VHD,
name_label,
name_description,
})
res.end(await sr.$xapi.getField('VDI', vdiRef, 'uuid'))
})
)
api.delete(
'/:collection(vdis|vdi-snapshots|vms|vm-snapshots|vm-templates)/:object',
wrap(async (req, res) => {
await req.xapiObject.$destroy()
res.sendStatus(200)
})
)
}
registerRestApi(spec, base = '/') {
for (const path of Object.keys(spec)) {
if (path[0] === '_') {
const handler = spec[path]
this.#api[path.slice(1)](base, json(), async (req, res, next) => {
try {
const result = await handler(req, res, next)
if (result !== undefined) {
const isIterable =
result !== null && typeof (result[Symbol.iterator] ?? result[Symbol.asyncIterator]) === 'function'
if (isIterable) {
await sendObjects(result, req, res)
} else {
res.json(result)
}
}
} catch (error) {
next(error)
addToRestApi(spec) {
const add = (node, spec) => {
for (const key of Object.keys(spec)) {
if (key.length !== 1 && key[0] === '_') {
if (node.has(key)) {
throw new Error('duplicate entry')
}
})
} else {
this.registerRestApi(spec[path], join(base, path))
node.set(key, spec[key])
} else {
let current = node.get(key)
if (current === undefined) {
current = new Map()
node.set(key, current)
}
add(current, spec[key])
}
}
}
return () => {
throw new Error('not implemented')
}
return add(this.#root, spec)
}
}

View File

@@ -546,8 +546,8 @@ export default class XenServers {
return xapi === undefined
? 'disconnected'
: this._serverIdsByPool[xapi.pool?.$id] === id
? 'connected'
: 'connecting'
? 'connected'
: 'connecting'
}
async getAllXenServers() {

View File

@@ -18,14 +18,14 @@
"preferGlobal": false,
"main": "dist/",
"engines": {
"node": ">=12.3"
"node": ">=12"
},
"dependencies": {
"child-process-promise": "^2.0.3",
"lodash": "^4.17.15",
"pako": "^2.0.4",
"promise-toolbox": "^0.21.0",
"tar-stream": "^3.1.6",
"tar-stream": "^2.2.0",
"vhd-lib": "^4.6.1",
"xml2js": "^0.4.23"
},

View File

@@ -1,13 +1,12 @@
import tar from 'tar-stream'
import { computeVmdkLength, vhdToVMDKIterator } from '.'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'stream'
// WE MIGHT WANT TO HAVE A LOOK HERE: https://opennodecloud.com/howto/2013/12/25/howto-ON-ovf-reference.html
/**
*
* @param outStream
* @param writeStream
* @param vmName
* @param vmDescription
* @param disks [{name, fileName, capacityMB, getStream}]
@@ -17,13 +16,19 @@ import { pipeline } from 'stream'
* @returns readStream
*/
export async function writeOvaOn(
outStream,
writeStream,
{ vmName, vmDescription = '', disks = [], firmware = 'bios', nics = [], vmMemoryMB = 64, cpuCount = 1 }
) {
const ovf = createOvf(vmName, vmDescription, disks, nics, vmMemoryMB, cpuCount, firmware)
const tarStream = tar.pack()
pipeline(tarStream, outStream, () => {})
await fromCallback.call(tarStream, tarStream.entry, { name: `metadata.ovf` }, Buffer.from(ovf, 'utf8'))
const pack = tar.pack()
const pipe = pack.pipe(writeStream)
await fromCallback.call(pack, pack.entry, { name: `metadata.ovf` }, Buffer.from(ovf, 'utf8'))
async function writeDisk(entry, blockIterator) {
for await (const block of blockIterator) {
await fromCallback.call(entry, entry.write, block)
}
}
// https://github.com/mafintosh/tar-stream/issues/24#issuecomment-558358268
async function pushDisk(disk) {
@@ -33,20 +38,23 @@ export async function writeOvaOn(
}
disk.fileSize = size
return new Promise((resolve, reject) => {
const entryWriteStream = tarStream.entry({ name: `${disk.name}.vmdk`, size, type: 'file' }, err => {
const entry = pack.entry({ name: `${disk.name}.vmdk`, size }, err => {
if (err == null) {
return resolve()
} else return reject(err)
})
pipeline(iterator, entryWriteStream, () => {})
return writeDisk(entry, iterator).then(
() => entry.end(),
e => reject(e)
)
})
}
for (const disk of disks) {
await pushDisk(disk)
}
tarStream.finalize()
return outStream
pack.finalize()
return pipe
}
function createDiskSections(disks) {

View File

@@ -1200,7 +1200,6 @@ const messages = {
copyToClipboardLabel: 'Copy',
ctrlAltDelButtonLabel: 'Ctrl+Alt+Del',
ctrlAltDelConfirmation: 'Send Ctrl+Alt+Del to VM?',
disabledConsole: 'Console is disabled for this VM',
multilineCopyToClipboard: 'Multiline copy',
tipLabel: 'Tip:',
hideHeaderTooltip: 'Hide info',

View File

@@ -285,8 +285,8 @@ const TimePicker = decorate([
step === 1
? optionsValues
: step !== undefined
? optionsValues.filter((_, i) => i % step === 0)
: value.split(',').map(Number),
? optionsValues.filter((_, i) => i % step === 0)
: value.split(',').map(Number),
// '*' => 1
// '*/2' => 2

View File

@@ -93,10 +93,10 @@ const SelectCoresPerSocket = decorate([
state.valueExceedsCoresLimit
? _('vmCoresPerSocketExceedsCoresLimit', { maxCores })
: state.valueExceedsSocketsLimit
? _('vmCoresPerSocketExceedsSocketsLimit', {
maxSockets: MAX_VM_SOCKETS,
})
: _('vmCoresPerSocketNotDivisor')
? _('vmCoresPerSocketExceedsSocketsLimit', {
maxSockets: MAX_VM_SOCKETS,
})
: _('vmCoresPerSocketNotDivisor')
}
>
<Icon icon='error' size='lg' />

View File

@@ -67,8 +67,8 @@ const getIds = value =>
value == null || typeof value === 'string' || isInteger(value)
? value
: Array.isArray(value)
? map(value, getIds)
: value.id
? map(value, getIds)
: value.id
const getOption = (object, container) => ({
label: container ? `${getLabel(object)} ${getLabel(container)}` : getLabel(object),
@@ -159,11 +159,11 @@ class GenericSelect extends React.Component {
return isEmpty(missingObjects)
? objects
: withContainers
? {
...objects,
missingObjects,
}
: [...objects, ...missingObjects]
? {
...objects,
missingObjects,
}
: [...objects, ...missingObjects]
}
)

View File

@@ -156,8 +156,8 @@ export const createFilter = (collection, predicate) =>
? EMPTY_ARRAY
: EMPTY_OBJECT
: predicate
? (isArrayLike(collection) ? filter : pickBy)(collection, predicate)
: collection
? (isArrayLike(collection) ? filter : pickBy)(collection, predicate)
: collection
)
)

View File

@@ -673,12 +673,12 @@ export const getDetachedBackupsOrSnapshots = (backupsOrSnapshots, { jobs, schedu
vm === undefined
? 'missingVm'
: job === undefined
? 'missingJob'
: schedules[scheduleId] === undefined
? 'missingSchedule'
: !createPredicate(omit(job.vms, 'power_state'))(vm)
? 'missingVmInJob'
: undefined
? 'missingJob'
: schedules[scheduleId] === undefined
? 'missingSchedule'
: !createPredicate(omit(job.vms, 'power_state'))(vm)
? 'missingVmInJob'
: undefined
if (reason !== undefined) {
detachedBackupsOrSnapshots.push({

Some files were not shown because too many files have changed in this diff Show More