Compare commits

...

63 Commits

Author SHA1 Message Date
badrAZ
888ae03d2d update implementation 2021-04-16 15:56:12 +02:00
badrAZ
125b74d873 Merge branch 'master' into compactReport 2021-04-16 15:36:49 +02:00
badrAZ
c38b957d7c fix(xo-{server,proxy}/config): add copyRetention default value (#5737)
Introduced in xo-server by 0811da901
2021-04-16 14:56:43 +02:00
Julien Fontanet
282bb26da9 chore(xapi/VM_{destroy,snapshot}): delete → destroy
Introduced by 6b1c30157
2021-04-16 10:35:48 +02:00
Julien Fontanet
6b1c30157f feat(xapi/VM_{destroy,snapshot}): warn instead of ignoring errors 2021-04-16 10:32:04 +02:00
Julien Fontanet
e433251420 fix(xo-server/recover-account): pass config as named param
Introduced by 7024c7d59
2021-04-15 15:52:56 +02:00
Julien Fontanet
49ed9c7f7f fix(xo-server/api): fix config name entry verboseApiLogsOnErrors 2021-04-15 13:34:05 +02:00
Julien Fontanet
5a5c0326b7 fix(xapi/VM_destroy): correctly check *other* VM is not control domain 2021-04-15 11:52:56 +02:00
Julien Fontanet
a25708be2b fix(xapi/VM_create): default actions_after_{crash,reboot} is restart
See https://xapi-project.github.io/xen-api/classes/vm.html

`reboot` is not valid.
2021-04-15 11:48:40 +02:00
Julien Fontanet
e8f2934534 feat(xo-server/getBackupNgLogs): expose proxyId
Follow up on b454b4dff
2021-04-15 11:43:34 +02:00
badrAZ
37f8ac9da9 fix(fs/LocalHandler#_lock): correctly resolve path (#5726) 2021-04-14 15:59:11 +02:00
badrAZ
0ded95ce48 fix(xo-server/backup-ng): add slash between backup and remote ids (#5723)
This is symmetric to the parsing: 052aafd7cb/packages/xo-server/src/xo-mixins/backups-ng/index.js (L88-L94)
2021-04-14 14:43:28 +02:00
Julien Fontanet
108e769833 fix(CHANGELOG.unreleased): @xen-orchestra/xapi
Introduced by 864946477
2021-04-14 11:46:12 +02:00
Julien Fontanet
5b2313ee56 feat(xapi): warn on retry 2021-04-14 11:10:20 +02:00
Julien Fontanet
368b84b7ff chore(xapi/VDI_destroy): move retry condition in constructor 2021-04-14 10:30:11 +02:00
Julien Fontanet
864946477b fix(xapi/VDI_destroy): respect vdiDestroyRetryWhenInUse option 2021-04-14 10:23:53 +02:00
Julien Fontanet
da67298b43 chore: update promise-toolbox to 0.19.0 2021-04-14 00:12:34 +02:00
Julien Fontanet
db5cb8b3a9 chore(disposables): using → Disposable.use 2021-04-13 23:35:10 +02:00
Julien Fontanet
9643292be6 fix(babel): dont ignore test files when linting 2021-04-13 18:09:40 +02:00
Julien Fontanet
a651e34206 fix(xo-server/math): fix ESLint directive 2021-04-13 18:09:40 +02:00
Julien Fontanet
a4e7fd3209 feat(xo-server): use @xen-orchestra/mixins/Config 2021-04-13 18:09:40 +02:00
Julien Fontanet
d1113d40aa chore(mixins): use PascalCase as they are classes 2021-04-13 18:09:40 +02:00
Julien Fontanet
dcd834d3e4 chore(xo-server/xo-mixins): xo → app
- already used in some mixins
- used in xo-proxy
2021-04-13 18:09:40 +02:00
badrAZ
c0be8a2c04 fix(@xen-orchestra/backups/_cleanVm): VHDs not correctly listed (#5720)
Introduced by 20f4c95
2021-04-13 16:09:42 +02:00
Julien Fontanet
09182172cf chore(xo-server): use @xen-orchestra/mixins/hooks 2021-04-13 13:41:22 +02:00
Julien Fontanet
56e903e359 feat(mixins): mixins shared between xo-proxy and xo-server 2021-04-13 13:17:50 +02:00
Julien Fontanet
9922d60e5b feat(@xen-orchestra/mixin): 0.1.0 2021-04-13 13:01:24 +02:00
Julien Fontanet
09ea42439e chore(mixin): remove build step 2021-04-13 12:31:11 +02:00
Julien Fontanet
ce1acf1adc feat(@xen-orchestra/proxy): 0.12.1 2021-04-13 10:46:44 +02:00
Julien Fontanet
fe00badb0f feat: release 5.57.1 2021-04-13 10:27:38 +02:00
Julien Fontanet
2146d67dc2 fix(CHANGELOG{,.unreleased}): move backup dev notes
Introduced by e7b846155
2021-04-13 10:26:26 +02:00
Julien Fontanet
6728768b3e feat(xo-server): 5.78.4 2021-04-12 23:43:33 +02:00
Julien Fontanet
48db3de08c feat(@xen-orchestra/backups): 0.9.3 2021-04-12 23:43:16 +02:00
Julien Fontanet
b944364d1e fix(backups/_copyDelta): dont pass extra params to watchStreamSize
Introduced by 9b1fbf0fb
2021-04-12 23:42:29 +02:00
Julien Fontanet
39c2fbe8c3 feat(xo-web): 5.80.1 2021-04-12 22:56:48 +02:00
Julien Fontanet
c7ba640ecb feat(xo-server): 5.78.3 2021-04-12 22:56:29 +02:00
Julien Fontanet
f749f6be72 feat(xo-server-load-balancer): 0.5.0 2021-04-12 22:56:09 +02:00
Julien Fontanet
ccdd384c6e feat(@xen-orchestra/backups): 0.9.2 2021-04-12 22:55:36 +02:00
Julien Fontanet
4061e2c149 feat(@xen-orchestra/xapi): 0.6.1 2021-04-12 22:55:19 +02:00
Julien Fontanet
e7b8461555 chore(CHANGELOG): update next 2021-04-12 22:54:51 +02:00
Julien Fontanet
70d1537ecc feat(xo-server/vm.set): dont switch to DMC when changing memory
Fixes #4983
2021-04-12 21:15:55 +02:00
Julien Fontanet
cb37f85d8e fix(xo-web/proxies): fix force ugprade
Introduced by a4d90e8aff

See xoa-support#3613

Forward options in `upgradeAppliance` effect.
2021-04-12 12:16:18 +02:00
Julien Fontanet
9becf565a4 fix(CHANGELOG.unreleased): add missing entriy
Introduced by 4bbe8488f
2021-04-12 11:12:54 +02:00
Julien Fontanet
b1a4e5467d feat(xo-server/xapi/startVm): move hostId into options 2021-04-12 11:01:42 +02:00
Julien Fontanet
4bbe8488fc fix(xo-server/xapi/startVm): dont destructure options without default value
See xoa-support#3613
2021-04-12 10:52:41 +02:00
Jon Sands
54a0d126b5 fix(xo-web/en): more grammar fixes (#5714) 2021-04-10 10:30:39 +02:00
Julien Fontanet
9b1fbf0fbf fix(backups/ImportVmBackup): use transfered size instead of backup size
Backup size is smaller in case of delta VHDs.
2021-04-09 15:33:50 +02:00
Julien Fontanet
6f626974ac chore(backups/readDeltaVmBackup): remove unused value 2021-04-09 15:02:26 +02:00
Julien Fontanet
5c47beb1c4 fix(CHANGELOG.unreleased): add missing entry
Related to 3cc9fd278
2021-04-09 11:35:49 +02:00
Julien Fontanet
b4fbe8df07 feat(xo-server/api): explicitely allow $type and enumNames in schemas 2021-04-09 11:16:17 +02:00
Julien Fontanet
3cc9fd2782 fix(xo-server/api): log instead of rejecting non-strict schemas
Fixes https://xcp-ng.org/forum/topic/4439/plugin-transport-email-v0-6-0-broken
2021-04-09 11:03:13 +02:00
Julien Fontanet
eaecba7ec8 fix(xo-server/api): dont log pool.listMissingPatches & host.stats errors
Introduced by 9226c6cac
2021-04-09 10:47:01 +02:00
Julien Fontanet
42a43be092 feat(backups/Task.wrapFn): opts can be a function 2021-04-09 01:27:54 +02:00
Julien Fontanet
052aafd7cb fix(backups/DeltaBackupWriter): merge should be subtask of export
Introduced by f5024f0e7
2021-04-09 01:25:01 +02:00
Julien Fontanet
4abae578f4 feat(backups/Task): new implementation
- no longer requires logging
- supports cancelation (`Task.cancelToken` and `Task#cancel()`)
- supports running multiple functions in the same task
2021-04-09 01:19:09 +02:00
Julien Fontanet
4132d96591 chore(backups): remove unused deps 2021-04-09 01:13:22 +02:00
Julien Fontanet
8e4c90129e fix(backups/DeltaBackupWriter): dont overwrite prepare/cleanup in constructor
Introduced in e69b6c4dc
2021-04-08 23:52:21 +02:00
Julien Fontanet
31406927e6 chore: disable unused Jest coverage 2021-04-08 22:25:10 +02:00
Julien Fontanet
303646efd3 chore: remove unnecessary Jest transform setting 2021-04-08 22:25:10 +02:00
Julien Fontanet
9efc4f9113 chore: remove unnecessary babel-core 2021-04-08 22:25:10 +02:00
Julien Fontanet
31a5a42ec7 chore: use @babel/eslint-parser instead of babel-eslint
babel-eslint is no longer maintained and has issues with some recent syntaxes like private methods.
2021-04-08 22:25:10 +02:00
Yannick Achy
2d0ed3ec8a feat(doc): Host update revision (#5716)
* Host update revision

Co-authored-by: yannick Achy <yannick.achy@vates.fr>
2021-04-08 16:54:06 +02:00
badrAZ
5258dad51a feat(xo-server-backup-reports): send compact report to slack/mattermost
Fixes #3195

This PR removes subtasks from markdown
2021-01-29 12:05:56 +01:00
97 changed files with 894 additions and 793 deletions

View File

@@ -20,7 +20,7 @@ module.exports = {
},
],
parser: 'babel-eslint',
parser: '@babel/eslint-parser',
parserOptions: {
ecmaFeatures: {
legacyDecorators: true,

View File

@@ -48,7 +48,7 @@ import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
// it will wait for 10 seconds before calling the disposer
using(debounceResource(getConnection(host), 10e3), connection => {})
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
```
### `debounceResource.flushAll()`

View File

@@ -30,7 +30,7 @@ import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
// it will wait for 10 seconds before calling the disposer
using(debounceResource(getConnection(host), 10e3), connection => {})
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
```
### `debounceResource.flushAll()`

View File

@@ -58,7 +58,7 @@ module.exports = function (pkg, plugins, presets) {
return {
comments: !__PROD__,
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {

View File

@@ -7,12 +7,12 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.9.1",
"@xen-orchestra/backups": "^0.9.3",
"@xen-orchestra/fs": "^0.14.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"vhd-lib": "^1.0.0"
},
"engines": {

View File

@@ -3,6 +3,7 @@ const assert = require('assert')
const { formatFilenameDate } = require('./_filenameDate')
const { importDeltaVm } = require('./_deltaVm')
const { Task } = require('./Task')
const { watchStreamSize } = require('./_watchStreamSize')
exports.ImportVmBackup = class ImportVmBackup {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
@@ -18,13 +19,17 @@ exports.ImportVmBackup = class ImportVmBackup {
const metadata = this._metadata
const isFull = metadata.mode === 'full'
const sizeContainer = { size: 0 }
let backup
if (isFull) {
backup = await adapter.readFullVmBackup(metadata)
watchStreamSize(backup, sizeContainer)
} else {
assert.strictEqual(metadata.mode, 'delta')
backup = await adapter.readDeltaVmBackup(metadata)
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
}
return Task.run(
@@ -52,7 +57,7 @@ exports.ImportVmBackup = class ImportVmBackup {
])
return {
size: metadata.size,
size: sizeContainer.size,
id: await xapi.getField('VM', vmRef, 'uuid'),
}
}

View File

@@ -528,7 +528,7 @@ class RemoteAdapter {
const dir = dirname(metadata._filename)
const streams = {}
await asyncMapSettled(Object.entries(vdis), async ([id, vdi]) => {
await asyncMapSettled(Object.keys(vdis), async id => {
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
})

View File

@@ -1,11 +1,12 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const { SyncThenable } = require('./_syncThenable')
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype
// Create a serializable object from an error.
//
// Otherwise some fields might be non-enumerable and missing from logs.
@@ -19,163 +20,132 @@ const serializeError = error =>
stack: error.stack,
}
: error
exports.serializeError = serializeError
class TaskLogger {
constructor(logFn, parentId) {
this._log = logFn
this._parentId = parentId
this._taskId = undefined
const $$task = Symbol('@xen-orchestra/backups/Task')
class Task {
static get cancelToken() {
const task = Zone.current.data[$$task]
return task !== undefined ? task.#cancelToken : CancelToken.none
}
get taskId() {
const taskId = this._taskId
if (taskId === undefined) {
throw new Error('start the task first')
}
return taskId
static run(opts, fn) {
return new this(opts).run(fn, true)
}
// create a subtask
fork() {
return new TaskLogger(this._log, this.taskId)
}
info(message, data) {
return this._log({
data,
event: 'info',
message,
taskId: this.taskId,
timestamp: Date.now(),
})
}
run(message, data, fn) {
if (arguments.length === 2) {
fn = data
data = undefined
}
return SyncThenable.tryUnwrap(
SyncThenable.fromFunction(() => {
if (this._taskId !== undefined) {
throw new Error('task has already started')
}
this._taskId = Math.random().toString(36).slice(2)
return this._log({
data,
event: 'start',
message,
parentId: this._parentId,
taskId: this.taskId,
timestamp: Date.now(),
})
})
.then(fn)
.then(
result => {
const log = this._log
this._log = logAfterEnd
return SyncThenable.resolve(
log({
event: 'end',
result,
status: 'success',
taskId: this.taskId,
timestamp: Date.now(),
})
).then(() => result)
},
error => {
const log = this._log
this._log = logAfterEnd
return SyncThenable.resolve(
log({
event: 'end',
result: serializeError(error),
status: 'failure',
taskId: this.taskId,
timestamp: Date.now(),
})
).then(() => {
throw error
})
}
)
)
}
warning(message, data) {
return this._log({
data,
event: 'warning',
message,
taskId: this.taskId,
timestamp: Date.now(),
})
}
wrapFn(fn, message, data) {
const logger = this
return function () {
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
return logger.run(evaluate(message), evaluate(data), () => fn.apply(this, arguments))
}
}
}
const $$task = Symbol('current task logger')
const getCurrent = () => Zone.current.data[$$task]
const Task = {
info(message, data) {
const task = getCurrent()
if (task !== undefined) {
return task.info(message, data)
}
},
run({ name, data, onLog }, fn) {
let parentId
if (onLog === undefined) {
const parent = getCurrent()
if (parent === undefined) {
return fn()
}
onLog = parent._log
parentId = parent.taskId
}
const task = new TaskLogger(onLog, parentId)
const zone = Zone.current.fork('task')
zone.data[$$task] = task
return task.run(name, data, zone.wrap(fn))
},
warning(message, data) {
const task = getCurrent()
if (task !== undefined) {
return task.warning(message, data)
}
},
wrapFn(opts, fn) {
static wrapFn(opts, fn) {
// compatibility with @decorateWith
if (typeof fn !== 'function') {
;[fn, opts] = [opts, fn]
}
const { name, data, onLog } = opts
return function () {
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
return Task.run({ name: evaluate(name), data: evaluate(data), onLog }, () => fn.apply(this, arguments))
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
}
},
}
#cancelToken
#id = Math.random().toString(36).slice(2)
#onLog
#zone
constructor({ name, data, onLog }) {
let parentCancelToken, parentId
if (onLog === undefined) {
const parent = Zone.current.data[$$task]
if (parent === undefined) {
onLog = noop
} else {
onLog = log => parent.#onLog(log)
parentCancelToken = parent.#cancelToken
parentId = parent.#id
}
}
const zone = Zone.current.fork('@xen-orchestra/backups/Task')
zone.data[$$task] = this
this.#zone = zone
const { cancel, token } = CancelToken.source(parentCancelToken && [parentCancelToken])
this.#cancelToken = token
this.cancel = cancel
this.#onLog = onLog
this.#log('start', {
data,
message: name,
parentId,
})
}
failure(error) {
this.#end('failure', serializeError(error))
}
info(message, data) {
this.#log('info', { data, message })
}
/**
* Run a function in the context of this task
*
* In case of error, the task will be failed.
*
* @typedef Result
* @param {() => Result)} fn
* @param {boolean} last - Whether the task should succeed if there is no error
* @returns Result
*/
run(fn, last = false) {
return this.#zone.run(() => {
try {
const result = fn()
let then
if (result != null && typeof (then = result.then) === 'function') {
then.call(result, last && (value => this.success(value)), error => this.failure(error))
} else if (last) {
this.success(result)
}
return result
} catch (error) {
this.failure(error)
throw error
}
})
}
success(value) {
this.#end('success', value)
}
warning(message, data) {
this.#log('warning', { data, message })
}
wrapFn(fn, last) {
const task = this
return function () {
return task.run(() => fn.apply(this, arguments), last)
}
}
#end(status, result) {
this.#log('end', { result, status })
this.#onLog = logAfterEnd
}
#log(event, props) {
this.#onLog({
...props,
event,
taskId: this.#id,
timestamp: Date.now(),
})
}
}
exports.Task = Task
for (const method of ['info', 'warning']) {
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
}

View File

@@ -13,18 +13,6 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
this._backup = backup
this._settings = settings
this._sr = sr
this.transfer = Task.wrapFn(
{
name: 'export',
data: ({ deltaExport }) => ({
id: sr.uuid,
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
type: 'SR',
}),
},
this.transfer
)
}
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
@@ -51,7 +39,23 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
}
}
async prepare() {
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
name: 'export',
data: {
id: this._sr.uuid,
isFull,
type: 'SR',
},
})
this.transfer = task.wrapFn(this.transfer)
this.cleanup = task.wrapFn(this.cleanup, true)
return task.run(() => this._prepare())
}
async _prepare() {
const settings = this._settings
const { uuid: srUuid, $xapi: xapi } = this._sr
const { scheduleId, vm } = this._backup
@@ -63,8 +67,12 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
if (settings.deleteFirst) {
await this._deleteOldEntries()
} else {
this.cleanup = this._deleteOldEntries
}
}
async cleanup() {
if (!this._settings.deleteFirst) {
await this._deleteOldEntries()
}
}

View File

@@ -20,21 +20,8 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
constructor(backup, remoteId, settings) {
this._adapter = backup.remoteAdapters[remoteId]
this._backup = backup
this._remoteId = remoteId
this._settings = settings
this.transfer = Task.wrapFn(
{
name: 'export',
data: ({ deltaExport }) => ({
id: remoteId,
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
type: 'remote',
}),
},
this.transfer
)
this[settings.deleteFirst ? 'prepare' : 'cleanup'] = this._deleteOldEntries
}
async checkBaseVdis(baseUuidToSrcVdi) {
@@ -72,7 +59,23 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
})
}
async prepare() {
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
name: 'export',
data: {
id: this._remoteId,
isFull,
type: 'remote',
},
})
this.transfer = task.wrapFn(this.transfer)
this.cleanup = task.wrapFn(this.cleanup, true)
return task.run(() => this._prepare())
}
async _prepare() {
const adapter = this._adapter
const settings = this._settings
const { scheduleId, vm } = this._backup
@@ -99,8 +102,12 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
if (settings.deleteFirst) {
await this._deleteOldEntries()
} else {
this.cleanup = this._deleteOldEntries
}
}
async cleanup() {
if (!this._settings.deleteFirst) {
await this._deleteOldEntries()
}
}

View File

@@ -146,13 +146,16 @@ exports.VmBackup = class VmBackup {
async _copyDelta() {
const { exportedVm } = this
const baseVm = this._baseVm
const fullVdisRequired = this._fullVdisRequired
await asyncMap(this._writers, writer => writer.prepare && writer.prepare())
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
await asyncMap(this._writers, writer => writer.prepare && writer.prepare({ isFull }))
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
fullVdisRequired: this._fullVdisRequired,
fullVdisRequired,
})
const sizeContainers = mapValues(deltaExport.streams, watchStreamSize)
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
const timestamp = Date.now()

View File

@@ -72,6 +72,29 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, {
const noop = Function.prototype
const listVhds = async (handler, vmDir) => {
const vhds = []
await asyncMap(
await handler.list(`${vmDir}/vdis`, {
prependDir: true,
}),
async jobDir =>
asyncMap(
await handler.list(jobDir, {
prependDir: true,
}),
async vdiDir =>
vhds.push(
...(await handler.list(vdiDir, {
filter: isVhdFile,
prependDir: true,
}))
)
)
)
return vhds
}
exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop }) {
const handler = this._handler
@@ -80,36 +103,30 @@ exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop })
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(
await handler.list(`${vmDir}/vdis`, {
filter: isVhdFile,
prependDir: true,
}),
async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
onLog(`error while checking the VHD with path ${path}`)
if (error?.code === 'ERR_ASSERTION' && remove) {
await handler.unlink(path)
await asyncMap(await listVhds(handler, vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
onLog(`error while checking the VHD with path ${path}`)
if (error?.code === 'ERR_ASSERTION' && remove) {
await handler.unlink(path)
}
}
)
})
// remove VHDs with missing ancestors
{

View File

@@ -10,7 +10,7 @@ exports.getTmpDir = async function getTmpDir() {
const path = join(tmpdir(), Math.random().toString(36).slice(2))
try {
await mkdir(path)
return new Disposable(path, () => rmdir(path))
return new Disposable(() => rmdir(path), path)
} catch (error) {
if (i === MAX_ATTEMPTS) {
throw error

View File

@@ -1,46 +0,0 @@
function fulfilledThen(cb) {
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
}
function rejectedThen(_, cb) {
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
}
class SyncThenable {
static resolve(value) {
if (value != null && typeof value.then === 'function') {
return value
}
return new this(false, value)
}
static fromFunction(fn, ...arg) {
try {
return this.resolve(fn(...arg))
} catch (error) {
return this.reject(error)
}
}
static reject(reason) {
return new this(true, reason)
}
// unwrap if it's a SyncThenable
static tryUnwrap(value) {
if (value instanceof this) {
if (value.then === rejectedThen) {
throw value.value
}
return value.value
}
return value
}
constructor(rejected, value) {
this.then = rejected ? rejectedThen : fulfilledThen
this.value = value
}
}
exports.SyncThenable = SyncThenable

View File

@@ -1,5 +1,4 @@
exports.watchStreamSize = function watchStreamSize(stream) {
const container = { size: 0 }
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
stream.on('data', data => {
container.size += data.length
})

View File

@@ -8,9 +8,9 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.9.1",
"version": "0.9.3",
"engines": {
"node": ">=14.5"
"node": ">=14.6"
},
"scripts": {
"postversion": "npm publish --access public"
@@ -18,7 +18,6 @@
"dependencies": {
"@vates/compose": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/multi-key-map": "^0.1.0",
"@vates/parse-duration": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.14.0",
@@ -27,19 +26,18 @@
"compare-versions": "^3.6.0",
"d3-time-format": "^3.0.0",
"end-of-stream": "^1.4.4",
"ensure-array": "^1.0.0",
"fs-extra": "^9.0.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"vhd-lib": "^1.0.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.6.0"
"@xen-orchestra/xapi": "^0.6.1"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -31,7 +31,7 @@
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",

View File

@@ -84,7 +84,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
_lock(path) {
return lockfile.lock(path)
return lockfile.lock(this._getFilePath(path))
}
_mkdir(dir, { mode }) {

View File

@@ -30,7 +30,7 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.18.0"
"promise-toolbox": "^0.19.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1,39 @@
const camelCase = require('lodash/camelCase')
const { defineProperties, defineProperty, keys } = Object
const noop = Function.prototype
const MIXIN_CYCLIC_DESCRIPTOR = {
configurable: true,
get() {
throw new Error('cyclic dependency')
},
}
module.exports = function mixin(object, mixins, args) {
// add lazy property for each of the mixin, this allows mixins to depend on
// one another without any special ordering
const descriptors = {}
keys(mixins).forEach(name => {
const Mixin = mixins[name]
name = camelCase(name)
descriptors[name] = {
configurable: true,
get: () => {
defineProperty(object, name, MIXIN_CYCLIC_DESCRIPTOR)
const instance = new Mixin(object, ...args)
defineProperty(object, name, {
value: instance,
})
return instance
},
}
})
defineProperties(object, descriptors)
// access all mixin properties to trigger their creation
keys(descriptors).forEach(name => {
noop(object[name])
})
}

View File

@@ -1,4 +1,4 @@
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
const { getBoundPropertyDescriptor } = require('bind-property-descriptor')
// ===================================================================
@@ -25,7 +25,7 @@ const ownKeys =
// -------------------------------------------------------------------
const mixin = Mixins => Class => {
if (__DEV__ && !Array.isArray(Mixins)) {
if (!Array.isArray(Mixins)) {
throw new TypeError('Mixins should be an array')
}
@@ -44,7 +44,7 @@ const mixin = Mixins => Class => {
}
for (const prop of ownKeys(Mixin)) {
if (__DEV__ && prop in prototype) {
if (prop in prototype) {
throw new Error(`${name}#${prop} is already defined`)
}
@@ -106,7 +106,7 @@ const mixin = Mixins => Class => {
return
}
if (__DEV__ && prop in descriptors) {
if (prop in descriptors) {
throw new Error(`${name}.${prop} is already defined`)
}
@@ -117,4 +117,4 @@ const mixin = Mixins => Class => {
return DecoratedClass
}
export { mixin as default }
module.exports = mixin

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/mixin",
"version": "0.0.0",
"version": "0.1.0",
"license": "ISC",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/mixin",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
@@ -15,34 +15,14 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"files": [
"dist/"
],
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-dev": "^1.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
"bind-property-descriptor": "^1.0.0",
"lodash": "^4.17.21"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -1,12 +1,12 @@
import get from 'lodash/get'
import identity from 'lodash/identity'
import { createLogger } from '@xen-orchestra/log'
import { parseDuration } from '@vates/parse-duration'
import { watch } from 'app-conf'
const get = require('lodash/get')
const identity = require('lodash/identity')
const { createLogger } = require('@xen-orchestra/log')
const { parseDuration } = require('@vates/parse-duration')
const { watch } = require('app-conf')
const { warn } = createLogger('xo:proxy:config')
const { warn } = createLogger('xo:mixins:config')
export default class Config {
module.exports = class Config {
constructor(app, { appDir, appName, config }) {
this._config = config
const watchers = (this._watchers = new Set())

View File

@@ -1,9 +1,9 @@
import assert from 'assert'
import emitAsync from '@xen-orchestra/emit-async'
import EventEmitter from 'events'
import { createLogger } from '@xen-orchestra/log'
const assert = require('assert')
const emitAsync = require('@xen-orchestra/emit-async').default
const EventEmitter = require('events')
const { createLogger } = require('@xen-orchestra/log')
const { debug, warn } = createLogger('xo:proxy:hooks')
const { debug, warn } = createLogger('xo:mixins:hooks')
const runHook = async (emitter, hook) => {
debug(`${hook} start…`)
@@ -17,7 +17,7 @@ const runHook = async (emitter, hook) => {
debug(`${hook} finished`)
}
export default class Hooks extends EventEmitter {
module.exports = class Hooks extends EventEmitter {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.

View File

@@ -0,0 +1,30 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/mixins
[![Package Version](https://badgen.net/npm/v/@xen-orchestra/mixins)](https://npmjs.org/package/@xen-orchestra/mixins) ![License](https://badgen.net/npm/license/@xen-orchestra/mixins) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@xen-orchestra/mixins)](https://bundlephobia.com/result?p=@xen-orchestra/mixins) [![Node compatibility](https://badgen.net/npm/node/@xen-orchestra/mixins)](https://npmjs.org/package/@xen-orchestra/mixins)
> Mixins shared between xo-proxy and xo-server
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/mixins):
```
> npm install --save @xen-orchestra/mixins
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)

View File

View File

@@ -0,0 +1,31 @@
{
"private": false,
"name": "@xen-orchestra/mixins",
"description": "Mixins shared between xo-proxy and xo-server",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/mixins",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/mixins",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.0.0",
"engines": {
"node": ">=12"
},
"dependencies": {
"@vates/parse-duration": "^0.1.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/log": "^0.2.0",
"app-conf": "^0.9.0",
"lodash": "^4.17.21"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -31,6 +31,7 @@ retentionXoMetadata = 0
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
copyRetention = 0
deleteFirst = false
exportRetention = 0
fullInterval = 0

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.12.0",
"version": "0.12.1",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -37,12 +37,14 @@
"@vates/decorate-with": "^0.0.1",
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.0",
"@xen-orchestra/backups": "^0.9.1",
"@xen-orchestra/backups": "^0.9.3",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.14.0",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.0.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.6.0",
"@xen-orchestra/xapi": "^0.6.1",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.1.0",
@@ -60,7 +62,7 @@
"ms": "^2.1.2",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^4.0.0",

View File

@@ -1,45 +1,13 @@
import camelCase from 'lodash/camelCase'
import Config from '@xen-orchestra/mixins/Config'
import Hooks from '@xen-orchestra/mixins/Hooks'
import mixin from '@xen-orchestra/mixin'
import { createDebounceResource } from '@vates/disposable/debounceResource'
import mixins from './mixins'
const { defineProperties, defineProperty, keys } = Object
const noop = Function.prototype
const MIXIN_CYCLIC_DESCRIPTOR = {
configurable: true,
get() {
throw new Error('cyclic dependency')
},
}
export default class App {
constructor(opts) {
// add lazy property for each of the mixin, this allows mixins to depend on
// one another without any special ordering
const descriptors = {}
keys(mixins).forEach(name => {
const Mixin = mixins[name]
name = camelCase(name)
descriptors[name] = {
configurable: true,
get: () => {
defineProperty(this, name, MIXIN_CYCLIC_DESCRIPTOR)
const instance = new Mixin(this, opts)
defineProperty(this, name, {
value: instance,
})
return instance
},
}
})
defineProperties(this, descriptors)
// access all mixin properties to trigger their creation
keys(descriptors).forEach(name => {
noop(this[name])
})
mixin(this, { Config, Hooks, ...mixins }, [opts])
const debounceResource = createDebounceResource()
this.config.watchDuration('resourceCacheDelay', delay => {

View File

@@ -15,7 +15,7 @@ const { debug, warn } = createLogger('xo:proxy:appliance')
const getUpdater = deduped(async function () {
const updater = new JsonRpcWebsocketClient('ws://localhost:9001')
await updater.open()
return new Disposable(updater, () => updater.close())
return new Disposable(() => updater.close(), updater)
})
const callUpdate = params =>

View File

@@ -49,6 +49,6 @@ export default class Remotes {
}
await handler.sync()
return new Disposable(handler, () => handler.forget())
return new Disposable(() => handler.forget(), handler)
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.6.0",
"version": "0.6.1",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -47,7 +47,7 @@
"golike-defer": "^0.5.1",
"lodash": "^4.17.15",
"make-error": "^1.3.5",
"promise-toolbox": "^0.18.0"
"promise-toolbox": "^0.19.0"
},
"private": false,
"license": "AGPL-3.0-or-later",

View File

@@ -4,6 +4,8 @@ const pRetry = require('promise-toolbox/retry')
const { utcFormat, utcParse } = require('d3-time-format')
const { Xapi: Base } = require('xen-api')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi')
exports.isDefaultTemplate = require('./isDefaultTemplate')
// VDI formats. (Raw is not available for delta vdi.)
@@ -34,6 +36,24 @@ const hasProps = o => {
return false
}
const getPoolInfo = ({ pool } = {}) =>
pool && {
uuid: pool.uuid,
name_label: pool.name_label,
}
function onRetry(error) {
try {
warn('retry', {
attemptNumber: this.attemptNumber,
delay: this.delay,
error,
fn: this.fn.name,
arguments: this.arguments,
pool: getPoolInfo(this.this),
})
} catch (error) {}
}
class Xapi extends Base {
constructor({
callRetryWhenTooManyPendingTasks,
@@ -49,6 +69,7 @@ class Xapi extends Base {
delay: 5e3,
tries: 10,
...callRetryWhenTooManyPendingTasks,
onRetry,
when: { code: 'TOO_MANY_PENDING_TASKS' },
}
this._ignoreNobakVdis = ignoreNobakVdis
@@ -57,6 +78,8 @@ class Xapi extends Base {
delay: 5e3,
retries: 10,
...vdiDestroyRetryWhenInUse,
onRetry,
when: { code: 'VDI_IN_USE' },
}
const genericWatchers = (this._genericWatchers = new Set())
@@ -138,16 +161,8 @@ mixin({
})
exports.Xapi = Xapi
// TODO: remove once using next promise-toolbox
function pRetryWrap(fn, options) {
const getOptions = typeof options !== 'function' ? () => options : options
return function () {
return pRetry(() => fn.apply(this, arguments), getOptions.apply(this, arguments))
}
}
function getCallRetryOpts() {
return this._callRetryWhenTooManyPendingTasks
}
Xapi.prototype.call = pRetryWrap(Xapi.prototype.call, getCallRetryOpts)
Xapi.prototype.callAsync = pRetryWrap(Xapi.prototype.callAsync, getCallRetryOpts)
Xapi.prototype.call = pRetry.wrap(Xapi.prototype.call, getCallRetryOpts)
Xapi.prototype.callAsync = pRetry.wrap(Xapi.prototype.callAsync, getCallRetryOpts)

View File

@@ -14,10 +14,7 @@ module.exports = class Vdi {
async destroy(vdiRef) {
await pCatch.call(
// work around a race condition in XCP-ng/XenServer where the disk is not fully unmounted yet
pRetry(() => this.callAsync('VDI.destroy', vdiRef), {
...this._vdiDestroyRetry,
when: { code: 'VDI_IN_USE' },
}),
pRetry(() => this.callAsync('VDI.destroy', vdiRef), this._vdiDestroyRetryWhenInUse),
// if this VDI is not found, consider it destroyed
{ code: 'HANDLE_INVALID' },
noop

View File

@@ -143,8 +143,8 @@ module.exports = class Vm {
async create(
$defer,
{
actions_after_crash = 'reboot',
actions_after_reboot = 'reboot',
actions_after_crash = 'restart',
actions_after_reboot = 'restart',
actions_after_shutdown = 'destroy',
affinity = Ref.EMPTY,
appliance,
@@ -323,21 +323,33 @@ module.exports = class Vm {
await this.call('VM.destroy', vmRef)
return Promise.all([
ignoreErrors.call(asyncMap(vm.snapshots, _ => this.VM_destroy(_))),
deleteDisks &&
ignoreErrors.call(
asyncMap(disks, async vdiRef => {
// Dont destroy if attached to other (non control domain) VMs
for (const vbdRef of await this.getField('VDI', vdiRef, 'VBDs')) {
const vmRef2 = await this.getField('VBD', vbdRef, 'VM')
if (vmRef2 !== vmRef && !(await this.getField('VM', vmRef, 'is_control_domain'))) {
return
}
}
await this.VDI_destroy(vdiRef)
asyncMap(vm.snapshots, snapshotRef =>
this.VM_destroy(snapshotRef).catch(error => {
warn('VM_destroy: failed to destroy snapshot', {
error,
snapshotRef,
vmRef,
})
),
})
),
deleteDisks &&
asyncMap(disks, async vdiRef => {
// Dont destroy if attached to other (non control domain) VMs
for (const vbdRef of await this.getField('VDI', vdiRef, 'VBDs')) {
const vmRef2 = await this.getField('VBD', vbdRef, 'VM')
if (vmRef2 !== vmRef && !(await this.getField('VM', vmRef2, 'is_control_domain'))) {
return
}
}
await this.VDI_destroy(vdiRef).catch(error => {
warn('VM_destroy: failed to destroy VDI', {
error,
vdiRef,
vmRef,
})
})
}),
])
}
@@ -352,7 +364,14 @@ module.exports = class Vm {
let exportedVmRef, destroySnapshot
if (useSnapshot) {
exportedVmRef = await this.VM_snapshot(vmRef, { cancelToken, name_label: `[XO Export] ${vm.name_label}` })
destroySnapshot = () => ignoreErrors.call(this.VM_destroy(exportedVmRef))
destroySnapshot = () =>
this.VM_destroy(exportedVmRef).catch(error => {
warn('VM_export: failed to destroy snapshots', {
error,
snapshotRef: exportedVmRef,
vmRef,
})
})
$defer.onFailure(destroySnapshot)
} else {
exportedVmRef = vmRef
@@ -475,7 +494,14 @@ module.exports = class Vm {
).filter(_ => _.name_label.startsWith(snapshotNameLabelPrefix))
// be safe: only delete if there was a single match
if (createdSnapshots.length === 1) {
ignoreErrors.call(this.VM_destroy(createdSnapshots[0]))
const snapshotRef = createdSnapshots[0]
this.VM_destroy(_).catch(error => {
warn('VM_sapshot: failed to destroy broken snapshot', {
error,
snapshotRef,
vmRef,
})
})
}
throw error
}
@@ -485,7 +511,13 @@ module.exports = class Vm {
tries: 3,
}
).then(extractOpaqueRef)
ignoreErrors.call(this.call('VM.add_tags', ref, 'quiesce'))
this.call('VM.add_tags', ref, 'quiesce').catch(error => {
warn('VM_snapshot: failed to add quiesce tag', {
vmRef,
snapshotRef: ref,
error,
})
})
break
} catch (error) {
const { code } = error

View File

@@ -1,9 +1,36 @@
# ChangeLog
## **5.57.0** (2021-04-01)
## **5.57.1** (2021-04-13)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Enhancements
- [Host/Load-balancer] Add option to disable migration (PR [#5706](https://github.com/vatesfr/xen-orchestra/pull/5706))
- [VM] Don't switch a VM to use [DMC](https://docs.citrix.com/en-us/xencenter/7-1/dmc-about.html) when changing the memory [#4983](https://github.com/vatesfr/xen-orchestra/issues/4983)
### Bug fixes
- [Backup restore] Generate new MAC addresses is disabled by default (PR [#5707](https://github.com/vatesfr/xen-orchestra/pull/5707))
- [Backup] Fix `vm.refresh_snapshots is not a function` error
- [Backup] Fix `cannot read property "length" of undefined` when using _delete first_ [Forum post](https://xcp-ng.org/forum/topic/4440/error-on-delta-backup-cannot-read-property-length-of-undefined)
- [Delta backup] Fix merge task not under corresponding remote and missing merge size in summary [#5708](https://github.com/vatesfr/xen-orchestra/issues/5708)
- [Delta backup restore] Fix incorrect reported size (and speed)
- [Settings/Logs] Correctly hide `pool.listMissingPatches` and `host.stats` errors
- [Plugins] Fix `strict mode: unknown keyword` when configuring some plugins
- Fix `Cannot destructure property 'bypassMacAddressesCheck' of 'undefined'` error which happens on various actions deploying a proxy
- [Proxies] Fix _Force upgrade_ `expect the result to be iterator` error
### Released packages
- @xen-orchestra/xapi 0.6.1
- @xen-orchestra/backups 0.9.3
- xo-server-load-balancer 0.5.0
- xo-server 5.78.4
- xo-web 5.80.1
## **5.57.0** (2021-04-01)
### Highlights
- [Backup] Run backup jobs on different system processes (PR [#5660](https://github.com/vatesfr/xen-orchestra/pull/5660))

View File

@@ -7,15 +7,10 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
[Host/Load-balancer] Add option to disable migration (PR [#5706](https://github.com/vatesfr/xen-orchestra/pull/5706))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Backup restore] Generate new MAC addresses is disabled by default (PR [#5707](https://github.com/vatesfr/xen-orchestra/pull/5707))
- [Backup] Fix `vm.refresh_snapshots is not a function` error
### Packages to release
> Packages will be released in the order they are here, therefore, they should
@@ -33,7 +28,9 @@
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- @xen-orchestra/fs patch
- @xen-orchestra/xapi patch
- xo-server-load-balancer minor
- xo-server patch
- xo-web patch
- @xen-orchestra/backups patch
- @xen-orchestra/backups-cli patch
- @xen-orchestra/mixins minor
- xo-server minor

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 23 KiB

View File

@@ -320,6 +320,7 @@ You can learn more about XenServer [resource management on the Citrix Website](h
:::tip
XCP-ng doesn't limit VMs to 32 vCPU
:::
### VDI live migration
Thanks to Xen Storage Motion, it's easy to move a VM disk from one storage location to another, while the VM is running! This feature can help you migrate from your local storage to a SAN, or just upgrade your SAN without any downtime.
@@ -481,7 +482,7 @@ If you are behind a proxy, please update your `xo-server` configuration to add a
### Notes on patching
- Xen Orchestra won't reboot your hosts automatically. That's your call to choose when to do it.
- Patching doesn't always require rebooting. Check the "Guidance" row: if "restartHost" is displayed, it means you need to reboot to have the patch fully applied (see screenshot below)
- Patching doesn't always require rebooting. Check in the host view if the reboot warning is displayed, it means you need to reboot to have the patch fully applied (see screenshot below)
- XO will install all patches without rebooting: that's not an issue. Even applying patches manually, **it's not mandatory to reboot after each patch**.
![](./assets/xo5patching.png)
@@ -492,9 +493,11 @@ If you are behind a proxy, please update your `xo-server` configuration to add a
As specified in the [documentation](https://xcp-ng.org/docs/requirements.html#pool-requirements) your pool shouldn't consist of hosts from different CPU vendors.
:::
::: warning
- Even with matching CPU vendors, in the case of different CPU models XCP-ng will scale the pool CPU ability to the CPU having the least instructions.
- Even with matching CPU vendors, in the case of different CPU models XCP-ng will scale the pool CPU ability to the CPU having the least instructions.
- All the hosts in a pool must run the same XCP-ng version.
:::
:::
### Creating a pool
First you should add your new host to XOA by going to New > Server as described in [the relevant chapter](manage_infrastructure.md#add-a-host).
@@ -625,4 +628,4 @@ xscontainer-prepare-vm -v <VM_UUID> -u <username>
:::tip
Because "prepare-vm" is not exposed outside of the Dom0 (yet?), we can't use Xen Orchestra to give you a one-click solution as of now.
:::
:::

View File

@@ -1,9 +1,8 @@
{
"devDependencies": {
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
"@babel/register": "^7.0.0",
"babel-core": "^7.0.0-0",
"babel-eslint": "^10.0.1",
"babel-jest": "^26.0.1",
"benchmark": "^2.1.4",
"eslint": "^7.6.0",
@@ -24,7 +23,7 @@
"lint-staged": "^10.2.7",
"lodash": "^4.17.4",
"prettier": "^2.0.5",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"sorted-object": "^2.0.1",
"vuepress": "^1.4.1"
},
@@ -37,7 +36,6 @@
}
},
"jest": {
"collectCoverage": true,
"moduleNameMapper": {
"^(@vates/[^/]+)$": [
"$1/src",
@@ -65,10 +63,7 @@
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"timers": "fake",
"transform": {
"\\.jsx?$": "babel-jest"
}
"timers": "fake"
},
"lint-staged": {
"*.{md,ts,ts}": "prettier --write"

View File

@@ -41,7 +41,7 @@
"cross-env": "^7.0.2",
"execa": "^5.0.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},

View File

@@ -26,7 +26,7 @@
"fs-extra": "^9.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"struct-fu": "^1.2.0",
"uuid": "^8.3.1"
},

View File

@@ -45,7 +45,7 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"

View File

@@ -46,7 +46,7 @@
"nice-pipe": "0.0.0",
"pretty-ms": "^7.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^3.0.0",

View File

@@ -53,7 +53,7 @@
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.1.0",
"lodash": "^4.17.19",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"readable-stream": "^3.5.0",
"xo-common": "^0.7.0"
},

View File

@@ -37,7 +37,7 @@
"exec-promise": "^0.7.0",
"inquirer": "^8.0.0",
"ldapts": "^2.2.1",
"promise-toolbox": "^0.18.0"
"promise-toolbox": "^0.19.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -375,10 +375,22 @@ class BackupReportsXoPlugin {
})
}
const failedTasksText = []
const skippedVmsText = []
const successfulVmsText = []
const interruptedVmsText = []
const failedVmsText = {
default: [],
compact: [],
}
const skippedVmsText = {
default: [],
compact: [],
}
const successfulVmsText = {
default: [],
compact: [],
}
const interruptedVmsText = {
default: [],
compact: [],
}
const nagiosText = []
let globalMergeSize = 0
@@ -393,8 +405,10 @@ class BackupReportsXoPlugin {
const { type, id } = taskLog.data ?? {}
if (taskLog.message === 'get SR record' || taskLog.message === 'get remote adapter') {
const text = []
++nFailures
failedTasksText.push(
text.push(
// It will ensure that it will never be in a nested list
''
)
@@ -402,25 +416,29 @@ class BackupReportsXoPlugin {
try {
if (type === 'SR') {
const { name_label: name, uuid } = xo.getObject(id)
failedTasksText.push(`### ${name}`, '', `- **UUID**: ${uuid}`)
text.push(`### ${name}`, '', `- **UUID**: ${uuid}`)
nagiosText.push(`[(${type} failed) ${name} : ${taskLog.result.message} ]`)
} else {
const { name } = await xo.getRemote(id)
failedTasksText.push(`### ${name}`, '', `- **UUID**: ${id}`)
text.push(`### ${name}`, '', `- **UUID**: ${id}`)
nagiosText.push(`[(${type} failed) ${name} : ${taskLog.result.message} ]`)
}
} catch (error) {
logger.warn(error)
failedTasksText.push(`### ${UNKNOWN_ITEM}`, '', `- **UUID**: ${id}`)
text.push(`### ${UNKNOWN_ITEM}`, '', `- **UUID**: ${id}`)
nagiosText.push(`[(${type} failed) ${id} : ${taskLog.result.message} ]`)
}
failedTasksText.push(
text.push(
`- **Type**: ${type}`,
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
...getWarningsMarkdown(taskLog.warnings),
`- **Error**: ${taskLog.result.message}`
)
failedVmsText.default.push(...text)
failedVmsText.compact.push(...text)
continue
}
@@ -537,32 +555,39 @@ class BackupReportsXoPlugin {
if (taskLog.result !== undefined) {
if (taskLog.status === 'skipped') {
++nSkipped
skippedVmsText.push(
const common = [
...text,
`- **Reason**: ${
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: taskLog.result.message
}`
)
}`,
]
skippedVmsText.default.push(...common)
skippedVmsText.compact.push(...common)
nagiosText.push(`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${taskLog.result.message} ]`)
} else {
++nFailures
failedTasksText.push(...text, `- **Error**: ${taskLog.result.message}`)
const common = [...text, `- **Error**: ${taskLog.result.message}`]
failedVmsText.default.push(...common)
failedVmsText.compact.push(...common)
nagiosText.push(`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${taskLog.result.message} ]`)
}
} else {
if (taskLog.status === 'failure') {
++nFailures
failedTasksText.push(...text, ...subText)
failedVmsText.default.push(...text, ...subText)
failedVmsText.compact.push(...text)
nagiosText.push(`[${vm !== undefined ? vm.name_label : 'undefined'}: (failed)[${failedSubTasks.toString()}]]`)
} else if (taskLog.status === 'interrupted') {
++nInterrupted
interruptedVmsText.push(...text, ...subText)
interruptedVmsText.default.push(...text, ...subText)
interruptedVmsText.compact.push(...text)
nagiosText.push(`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`)
} else {
successfulVmsText.push(...text, ...subText)
successfulVmsText.default.push(...text, ...subText)
successfulVmsText.compact.push(...text)
}
}
}
@@ -582,27 +607,37 @@ class BackupReportsXoPlugin {
...getWarningsMarkdown(log.warnings),
'',
]
const slackMarkdown = [...markdown]
if (nFailures !== 0) {
markdown.push('---', '', `## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`, '', ...failedTasksText)
const common = ['---', '', `## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`, '']
markdown.push(...common, ...failedVmsText.default)
slackMarkdown.push(...common, ...failedVmsText.compact)
}
if (nSkipped !== 0) {
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
const common = ['---', '', `## ${nSkipped} Skipped`, '']
markdown.push(...common, ...skippedVmsText.default)
slackMarkdown.push(...common, ...skippedVmsText.compact)
}
if (nInterrupted !== 0) {
markdown.push('---', '', `## ${nInterrupted} Interrupted`, '', ...interruptedVmsText)
const common = ['---', '', `## ${nInterrupted} Interrupted`, '']
markdown.push(...common, ...interruptedVmsText.default)
slackMarkdown.push(...common, ...interruptedVmsText.compact)
}
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
markdown.push('---', '', `## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`, '', ...successfulVmsText)
const common = ['---', '', `## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`, '']
markdown.push(...common, ...successfulVmsText.default)
slackMarkdown.push(...common, ...successfulVmsText.compact)
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
mailReceivers,
markdown: toMarkdown(markdown),
slackMarkdown: toMarkdown(slackMarkdown),
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
success: log.status === 'success',
nagiosMarkdown:
@@ -614,7 +649,7 @@ class BackupReportsXoPlugin {
})
}
_sendReport({ mailReceivers, markdown, nagiosMarkdown, subject, success }) {
_sendReport({ mailReceivers, markdown, slackMarkdown = markdown, nagiosMarkdown, subject, success }) {
if (mailReceivers === undefined || mailReceivers.length === 0) {
mailReceivers = this._mailsReceivers
}
@@ -634,7 +669,7 @@ class BackupReportsXoPlugin {
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
message: slackMarkdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-load-balancer",
"version": "0.4.0",
"version": "0.5.0",
"license": "AGPL-3.0-or-later",
"description": "Load balancer for XO-Server",
"keywords": [

View File

@@ -35,7 +35,7 @@
"ipaddr.js": "^1.9.1",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.117",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"uuid": "^8.3.1"
},
"private": true,

View File

@@ -35,7 +35,7 @@
"golike-defer": "^0.5.1",
"jest": "^24.8.0",
"lodash": "^4.17.11",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"xo-collection": "^0.4.1",
"xo-common": "^0.7.0",
"xo-lib": "^0.10.1"

View File

@@ -33,7 +33,7 @@
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.18.0"
"promise-toolbox": "^0.19.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -32,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"slack-node": "^0.1.8"
},
"devDependencies": {

View File

@@ -42,7 +42,7 @@
"html-minifier": "^4.0.0",
"human-format": "^0.11.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.18.0"
"promise-toolbox": "^0.19.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -96,6 +96,7 @@ retentionXoMetadata = 0
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
copyRetention = 0
deleteFirst = false
exportRetention = 0
fullInterval = 0
@@ -130,6 +131,8 @@ port = 80
[http.mounts]
'/' = '../xo-web/dist'
[plugins]
[remoteOptions]
mountsDir = '/run/xo-server/mounts'

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.78.2",
"version": "5.78.4",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -40,16 +40,17 @@
"@vates/parse-duration": "0.1.0",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.9.1",
"@xen-orchestra/backups": "^0.9.3",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.14.0",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/mixin": "^0.0.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.0.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.6.0",
"@xen-orchestra/xapi": "^0.6.1",
"ajv": "^8.0.3",
"app-conf": "^0.9.0",
"archiver": "^5.0.0",
@@ -112,7 +113,7 @@
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^26.4.2",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"proxy-agent": "^4.0.0",
"pug": "^3.0.0",
"pump": "^3.0.0",

View File

@@ -49,7 +49,7 @@ createJob.params = {
}
export function getSuggestedExcludedTags() {
return ['Continuous Replication', 'Disaster Recovery', 'XOSAN', this._config['xo-proxy'].vmTag]
return ['Continuous Replication', 'Disaster Recovery', 'XOSAN', this.config.get('xo-proxy.vmTag')]
}
export function migrateLegacyJob({ id }) {

View File

@@ -1028,7 +1028,7 @@ rollingDrCopy.description =
// -------------------------------------------------------------------
export function start({ vm, bypassMacAddressesCheck, force, host }) {
return this.getXapi(vm).startVm(vm._xapiId, host?._xapiId, { bypassMacAddressesCheck, force })
return this.getXapi(vm).startVm(vm._xapiId, { bypassMacAddressesCheck, force, hostId: host?._xapiId })
}
start.params = {

View File

@@ -7,7 +7,7 @@ import createNdJsonStream from '../_createNdJsonStream'
// ===================================================================
export function clean() {
return this.clean()
return this.hooks.clean()
}
clean.permission = 'admin'

View File

@@ -262,7 +262,7 @@ async function setUpPassport(express, xo, { authentication: authCfg, http: { coo
next()
} else {
req.flash('return-url', url)
res.redirect(authCfg.defaultSignInPage)
res.redirect(xo.config.get('authentication.defaultSignInPage'))
}
})
@@ -295,14 +295,15 @@ async function registerPlugin(pluginPath, pluginName) {
let { default: factory = plugin, configurationSchema, configurationPresets, testSchema } = plugin
let instance
const config = this._config
const datadir = this.config.get('datadir')
const pluginsConfig = this.config.get('plugins')
const handleFactory = factory =>
typeof factory === 'function'
? factory({
staticConfig: config.plugins?.[pluginName] ?? {},
staticConfig: pluginsConfig[pluginName] ?? {},
xo: this,
getDataDir: () => {
const dir = `${config.datadir}/${pluginName}`
const dir = `${datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
@@ -493,7 +494,7 @@ const setUpProxies = (express, opts, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
express.on('upgrade', (req, socket, head) => {
const { url } = req
@@ -533,7 +534,7 @@ const setUpApi = (webServer, xo, config) => {
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
const onConnection = (socket, upgradeReq) => {
const { remoteAddress } = upgradeReq.socket
@@ -601,7 +602,7 @@ const setUpConsoleProxy = (webServer, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.hooks.on('stop', () => fromCallback.call(webSocketServer, 'close'))
webServer.on('upgrade', async (req, socket, head) => {
const matches = CONSOLE_PROXY_PATH_RE.exec(req.url)
@@ -730,13 +731,13 @@ export default async function main(args) {
})
// Register web server close on XO stop.
xo.on('stop', () => fromCallback.call(webServer, 'stop'))
xo.hooks.on('stop', () => fromCallback.call(webServer, 'stop'))
// Connects to all registered servers.
await xo.start()
await xo.hooks.start()
// Trigger a clean job.
await xo.clean()
await xo.hooks.clean()
// Express is used to manage non WebSocket connections.
const express = await createExpressApp(config)
@@ -806,11 +807,11 @@ export default async function main(args) {
alreadyCalled = true
log.info(`${signal} caught, closing…`)
xo.stop()
xo.hooks.stop()
})
})
await fromEvent(xo, 'stopped')
await fromEvent(xo.hooks, 'stopped')
log.info('bye :-)')
}

View File

@@ -9,7 +9,8 @@ const _combine = (vectors, n, cb) => {
const m = vector.length
if (n === 1) {
for (let i = 0; i < m; ++i) {
cb([vector[i]]) // eslint-disable-line standard/no-callback-literal
// eslint-disable-next-line node/no-callback-literal
cb([vector[i]])
}
return
}

View File

@@ -25,12 +25,12 @@ xo-server-recover-account <user name or email>
console.log('The generated password is', password)
}
const xo = new Xo(
await appConf.load('xo-server', {
const xo = new Xo({
config: await appConf.load('xo-server', {
appDir: joinPath(__dirname, '..'),
ignoreUnknownFormats: true,
})
)
}),
})
const user = await xo.getUserByName(name, true)
if (user !== null) {

View File

@@ -5,7 +5,7 @@ import concurrency from 'limit-concurrency-decorator'
import deferrable from 'golike-defer'
import fatfs from 'fatfs'
import mapToArray from 'lodash/map'
import mixin from '@xen-orchestra/mixin'
import mixin from '@xen-orchestra/mixin/legacy'
import ms from 'ms'
import synchronized from 'decorator-synchronized'
import tarStream from 'tar-stream'
@@ -1349,7 +1349,7 @@ export default class Xapi extends XapiBase {
return /* await */ this._snapshotVm(this.getObject(vmId), nameLabel)
}
async _startVm(vm, host, { force = false, bypassMacAddressesCheck = force } = {}) {
async _startVm(vm, { force = false, bypassMacAddressesCheck = force, hostId } = {}) {
if (!bypassMacAddressesCheck) {
const vmMacAddresses = vm.$VIFs.map(vif => vif.MAC)
if (new Set(vmMacAddresses).size !== vmMacAddresses.length) {
@@ -1373,19 +1373,19 @@ export default class Xapi extends XapiBase {
await vm.update_blocked_operations('start', null)
}
return host === undefined
return hostId === undefined
? this.call(
'VM.start',
vm.$ref,
false, // Start paused?
false // Skip pre-boot checks?
)
: this.callAsync('VM.start_on', vm.$ref, host.$ref, false, false)
: this.callAsync('VM.start_on', vm.$ref, this.getObject(hostId).$ref, false, false)
}
async startVm(vmId, hostId, { bypassMacAddressesCheck, force }) {
async startVm(vmId, options) {
try {
await this._startVm(this.getObject(vmId), hostId && this.getObject(hostId), { bypassMacAddressesCheck, force })
await this._startVm(this.getObject(vmId), options)
} catch (e) {
if (e.code === 'OPERATION_BLOCKED') {
throw forbiddenOperation('Start', e.params[1])

View File

@@ -340,7 +340,24 @@ export default {
set: 'memory_dynamic_min',
},
memory: 'memoryMax',
_memory: {
addToLimits: true,
get: vm => +vm.memory_dynamic_max,
preprocess: parseSize,
set(memory, vm) {
return vm.$call('set_memory_limits', vm.memory_static_min, memory, memory, memory)
},
},
memory: {
dispatch(vm) {
const dynamicMin = vm.memory_dynamic_min
const useDmc = dynamicMin !== vm.memory_dynamic_max || dynamicMin !== vm.memory_static_max
return useDmc ? 'memoryMax' : '_memory'
},
},
memoryMax: {
addToLimits: true,
limitName: 'memory',

View File

@@ -192,6 +192,10 @@ export const makeEditObject = specs => {
}
})
if ('dispatch' in spec) {
return spec
}
const { get } = spec
if (get) {
spec.get = normalizeGet(get, name)
@@ -235,6 +239,11 @@ export const makeEditObject = specs => {
return
}
const { dispatch } = spec
if (dispatch) {
return set(value, dispatch(object))
}
const { preprocess } = spec
if (preprocess) {
value = preprocess(value)

View File

@@ -7,17 +7,17 @@ import { Acls } from '../models/acl'
// ===================================================================
export default class {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
const aclsDb = (this._acls = new Acls({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:acl',
indexes: ['subject', 'object'],
}))
xo.on('start', () => {
xo.addConfigManager(
app.hooks.on('start', () => {
app.addConfigManager(
'acls',
() => aclsDb.get(),
acls => aclsDb.update(acls),
@@ -25,7 +25,7 @@ export default class {
)
})
xo.on('clean', async () => {
app.hooks.on('clean', async () => {
const acls = await aclsDb.get()
const toRemove = []
forEach(acls, ({ subject, object, action, id }) => {
@@ -39,7 +39,7 @@ export default class {
}
async _getAclsForUser(userId) {
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
const { groups } = user
const subjects = groups ? groups.concat(userId) : [userId]
@@ -97,25 +97,25 @@ export default class {
}
async checkPermissions(userId, permissions) {
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
// Special case for super XO administrators.
if (user.permission === 'admin') {
return true
}
aclResolver.assert(await this.getPermissionsForUser(userId), id => this._xo.getObject(id), permissions)
aclResolver.assert(await this.getPermissionsForUser(userId), id => this._app.getObject(id), permissions)
}
async hasPermissions(userId, permissions) {
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
// Special case for super XO administrators.
if (user.permission === 'admin') {
return true
}
return aclResolver.check(await this.getPermissionsForUser(userId), id => this._xo.getObject(id), permissions)
return aclResolver.check(await this.getPermissionsForUser(userId), id => this._app.getObject(id), permissions)
}
async removeAclsForObject(objectId) {

View File

@@ -143,14 +143,14 @@ async function resolveParams(method, params) {
// -------------------------------------------------------------------
export default class Api {
constructor(xo) {
constructor(app) {
this._logger = null
this._methods = { __proto__: null }
this._xo = xo
this._app = app
this.addApiMethods(methods)
xo.on('start', async () => {
this._logger = await xo.getLogger('api')
app.hooks.on('start', async () => {
this._logger = await app.getLogger('api')
})
}
@@ -216,7 +216,7 @@ export default class Api {
}
async callApiMethod(session, name, params = {}) {
const xo = this._xo
const app = this._app
const startTime = Date.now()
const method = this._methods[name]
@@ -236,11 +236,11 @@ export default class Api {
},
}
let obj = xo
let obj = app
do {
Object.getOwnPropertyNames(obj).forEach(name => {
if (!(name in descriptors)) {
descriptors[name] = getBoundPropertyDescriptor(obj, name, xo)
descriptors[name] = getBoundPropertyDescriptor(obj, name, app)
}
})
} while ((obj = Reflect.getPrototypeOf(obj)) !== null)
@@ -250,7 +250,7 @@ export default class Api {
// Fetch and inject the current user.
const userId = session.get('user_id', undefined)
context.user = userId && (await xo.getUser(userId))
context.user = userId && (await app.getUser(userId))
const userName = context.user ? context.user.email : '(unknown user)'
const data = {
@@ -264,7 +264,7 @@ export default class Api {
}
await emitAsync.call(
xo,
app,
{
onError(error) {
log.warn('xo:preCall listener failure', { error })
@@ -313,13 +313,13 @@ export default class Api {
// it's a special case in which the user is defined at the end of the call
if (data.method === 'session.signIn') {
const { id, email } = await xo.getUser(session.get('user_id'))
const { id, email } = await app.getUser(session.get('user_id'))
data.userId = id
data.userName = email
}
const now = Date.now()
xo.emit('xo:postCall', {
app.emit('xo:postCall', {
...data,
duration: now - data.timestamp,
result,
@@ -331,7 +331,7 @@ export default class Api {
const serializedError = serializeError(error)
const now = Date.now()
xo.emit('xo:postCall', {
app.emit('xo:postCall', {
...data,
duration: now - data.timestamp,
error: serializedError,
@@ -348,7 +348,7 @@ export default class Api {
// 2021-02-11: Work-around: ECONNREFUSED error can be triggered by
// 'host.stats' method because there is no connection to the host during a
// toolstack restart and xo-web may call it often
if (name !== 'pool.listMissingPatches' || name !== 'host.stats') {
if (name !== 'pool.listMissingPatches' && name !== 'host.stats') {
this._logger.error(message, {
...data,
duration: Date.now() - startTime,
@@ -356,7 +356,7 @@ export default class Api {
})
}
if (xo._config.verboseLogsOnErrors) {
if (app.config.get('verboseApiLogsOnErrors')) {
log.warn(message, { error })
} else {
log.warn(`${userName} | ${name}(...) [${ms(Date.now() - startTime)}] =!> ${error}`)
@@ -366,7 +366,7 @@ export default class Api {
if (xoError) {
throw xoError(error.params, ref => {
try {
return xo.getObject(ref).id
return app.getObject(ref).id
} catch (e) {
return ref
}

View File

@@ -14,13 +14,15 @@ const log = createLogger('xo:authentification')
const noSuchAuthenticationToken = id => noSuchObject(id, 'authenticationToken')
export default class {
constructor(xo, { config: { authentication: config } }) {
this._defaultTokenValidity = parseDuration(config.defaultTokenValidity)
this._maxTokenValidity = parseDuration(config.maxTokenValidity)
this._throttlingDelay = parseDuration(config.throttlingDelay)
constructor(app) {
app.config.watch('authentication', config => {
this._defaultTokenValidity = parseDuration(config.defaultTokenValidity)
this._maxTokenValidity = parseDuration(config.maxTokenValidity)
this._throttlingDelay = parseDuration(config.throttlingDelay)
})
this._providers = new Set()
this._xo = xo
this._app = app
// Store last failures by user to throttle tries (slow bruteforce
// attacks).
@@ -28,7 +30,7 @@ export default class {
// Creates persistent collections.
const tokensDb = (this._tokens = new Tokens({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:token',
indexes: ['user_id'],
}))
@@ -39,12 +41,12 @@ export default class {
return
}
const user = await xo.getUserByName(username, true)
if (user && (await xo.checkUserPassword(user.id, password))) {
const user = await app.getUserByName(username, true)
if (user && (await app.checkUserPassword(user.id, password))) {
return { userId: user.id }
}
xo.emit('xo:audit', 'signInFailed', {
app.emit('xo:audit', 'signInFailed', {
userId: user?.id,
userName: username,
userIp: ip,
@@ -58,12 +60,12 @@ export default class {
}
try {
const token = await xo.getAuthenticationToken(tokenId)
const token = await app.getAuthenticationToken(tokenId)
return { expiration: token.expiration, userId: token.user_id }
} catch (error) {}
})
xo.on('clean', async () => {
app.hooks.on('clean', async () => {
const tokens = await tokensDb.get()
const toRemove = []
const now = Date.now()
@@ -76,8 +78,8 @@ export default class {
return tokensDb.rebuildIndexes()
})
xo.on('start', () => {
xo.addConfigManager(
app.hooks.on('start', () => {
app.addConfigManager(
'authTokens',
() => tokensDb.get(),
tokens => tokensDb.update(tokens)
@@ -114,7 +116,7 @@ export default class {
const { userId, username, expiration } = result
return {
user: await (userId !== undefined ? this._xo.getUser(userId) : this._xo.registerUser(undefined, username)),
user: await (userId !== undefined ? this._app.getUser(userId) : this._app.registerUser(undefined, username)),
expiration,
}
} catch (error) {

View File

@@ -59,6 +59,7 @@ const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
// jobId?: string,
// jobName?: string,
// message?: 'backup' | 'metadataRestore' | 'restore',
// proxyId?: string,
// scheduleId?: string,
// start: number,
// status: 'pending' | 'failure' | 'interrupted' | 'skipped' | 'success',
@@ -88,6 +89,7 @@ export default {
jobId,
jobName: data.jobName,
message: 'backup',
proxyId: data.proxyId,
scheduleId,
start: time,
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',

View File

@@ -11,7 +11,6 @@ import { formatVmBackups } from '@xen-orchestra/backups/formatVmBackups'
import { forOwn, merge } from 'lodash'
import { ImportVmBackup } from '@xen-orchestra/backups/ImportVmBackup'
import { invalidParameters } from 'xo-common/api-errors'
import { parseDuration } from '@vates/parse-duration'
import { runBackupWorker } from '@xen-orchestra/backups/runBackupWorker'
import { Task } from '@xen-orchestra/backups/Task'
import { type Pattern, createPredicate } from 'value-matcher'
@@ -215,16 +214,17 @@ export default class BackupNg {
return this._runningRestores
}
constructor(app: any, { config }) {
constructor(app: any) {
this._app = app
this._logger = undefined
this._runningRestores = new Set()
this._backupOptions = config.backups
app.on('start', async () => {
app.hooks.on('start', async () => {
this._logger = await app.getLogger('restore')
const executor: Executor = async ({ cancelToken, data, job: job_, logger, runJobId, schedule }) => {
const backupsConfig = app.config.get('backups')
let job: BackupJob = (job_: any)
const vmsPattern = job.vms
@@ -256,7 +256,7 @@ export default class BackupNg {
const proxyId = job.proxy
const remoteIds = unboxIdsFromPattern(job.remotes)
try {
if (proxyId === undefined && config.backups.disableWorkers) {
if (proxyId === undefined && backupsConfig.disableWorkers) {
const localTaskIds = { __proto__: null }
return await Task.run(
{
@@ -270,7 +270,7 @@ export default class BackupNg {
},
() =>
new Backup({
config: config.backups,
config: backupsConfig,
getAdapter: async remoteId =>
app.getBackupsRemoteAdapter(await app.getRemoteWithCredentials(remoteId)),
@@ -363,10 +363,10 @@ export default class BackupNg {
const localTaskIds = { __proto__: null }
return await runBackupWorker(
{
config: config.backups,
remoteOptions: config.remoteOptions,
resourceCacheDelay: parseDuration(config.resourceCacheDelay),
xapiOptions: config.xapiOptions,
config: backupsConfig,
remoteOptions: app.config.get('remoteOptions'),
resourceCacheDelay: app.config.getDuration('resourceCacheDelay'),
xapiOptions: app.config.get('xapiOptions'),
...params,
},
log =>
@@ -546,7 +546,7 @@ export default class BackupNg {
@decorateWith(
debounceWithKey,
function () {
return parseDuration(this._backupOptions.listingDebounce)
return this._app.config.getDuration('backups.listingDebounce')
},
function keyFn(remoteId) {
return [this, remoteId]
@@ -576,7 +576,7 @@ export default class BackupNg {
// inject the remote id on the backup which is needed for importVmBackupNg()
forOwn(backupsByVm, backups =>
backups.forEach(backup => {
backup.id = `${remoteId}${backup.id}`
backup.id = `${remoteId}/${backup.id}`
})
)
return backupsByVm

View File

@@ -6,9 +6,8 @@ import { deduped } from '@vates/disposable/deduped'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter'
export default class BackupsRemoteAdapter {
constructor(app, { config: { backups } }) {
constructor(app) {
this._app = app
this._config = backups
}
// FIXME: invalidate cache on remote option change
@@ -21,7 +20,7 @@ export default class BackupsRemoteAdapter {
const app = this._app
return new RemoteAdapter(await app.getRemoteHandler(remote), {
debounceResource: app.debounceResource.bind(app),
dirMode: this._config.dirMode,
dirMode: app.config.get('backups.dirMode'),
})
}
}

View File

@@ -259,15 +259,15 @@ const mountLvmPv = (device, partition) => {
// ===================================================================
export default class {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
}
@decorateWith(debounceWithKey, DEBOUNCE_DELAY, function keyFn(remoteId) {
return [this, remoteId]
})
async listRemoteBackups(remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
const handler = await this._app.getRemoteHandler(remoteId)
// List backups. (No delta)
const backupFilter = file => file.endsWith('.xva')
@@ -296,7 +296,7 @@ export default class {
return [this, remoteId]
})
async listVmBackups(remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
const handler = await this._app.getRemoteHandler(remoteId)
const backups = []
@@ -329,9 +329,9 @@ export default class {
}
async importVmBackup(remoteId, file, sr) {
const handler = await this._xo.getRemoteHandler(remoteId)
const handler = await this._app.getRemoteHandler(remoteId)
const stream = await handler.createReadStream(file)
const xapi = this._xo.getXapi(sr)
const xapi = this._app.getXapi(sr)
const vm = await xapi.importVm(stream, { srId: sr._xapiId })
@@ -351,8 +351,8 @@ export default class {
@deferrable
async deltaCopyVm($defer, srcVm, targetSr, force = false, retention = 1) {
const transferStart = Date.now()
const srcXapi = this._xo.getXapi(srcVm)
const targetXapi = this._xo.getXapi(targetSr)
const srcXapi = this._app.getXapi(srcVm)
const targetXapi = this._app.getXapi(targetSr)
// Get Xen objects from XO objects.
const { uuid } = (srcVm = srcXapi.getObject(srcVm._xapiId))
@@ -374,7 +374,7 @@ export default class {
bypassVdiChainsCheck: force,
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`,
})
$defer.onFailure(() => this._xo.getXapiObject(delta.vm.uuid).$destroy())
$defer.onFailure(() => this._app.getXapiObject(delta.vm.uuid).$destroy())
$defer.onFailure(cancel)
const date = safeDateFormat(Date.now())
@@ -400,7 +400,7 @@ export default class {
// Once done, (asynchronously) remove the (now obsolete) local
// base.
if (localBaseUuid) {
promise.then(() => this._xo.getXapiObject(localBaseUuid).$destroy())::ignoreErrors()
promise.then(() => this._app.getXapiObject(localBaseUuid).$destroy())::ignoreErrors()
}
if (toRemove !== undefined) {
@@ -579,8 +579,8 @@ export default class {
@deferrable
async rollingDeltaVmBackup($defer, { vm, remoteId, tag, retention }) {
const transferStart = Date.now()
const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(vm)
const handler = await this._app.getRemoteHandler(remoteId)
const xapi = this._app.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
@@ -729,8 +729,8 @@ export default class {
filePath = `${filePath}${DELTA_BACKUP_EXT}`
const { datetime } = parseVmBackupPath(filePath)
const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(sr || mapVdisSrs[getFirstPropertyName(mapVdisSrs)])
const handler = await this._app.getRemoteHandler(remoteId)
const xapi = this._app.getXapi(sr || mapVdisSrs[getFirstPropertyName(mapVdisSrs)])
const delta = JSON.parse(await handler.readFile(filePath))
let vm
@@ -776,7 +776,7 @@ export default class {
// -----------------------------------------------------------------
async backupVm({ vm, remoteId, file, compress }) {
const handler = await this._xo.getRemoteHandler(remoteId)
const handler = await this._app.getRemoteHandler(remoteId)
return this._backupVm(vm, handler, file, { compress })
}
@@ -786,7 +786,7 @@ export default class {
$defer.onFailure.call(handler, 'unlink', file)
$defer.onFailure.call(targetStream, 'close')
const sourceStream = await this._xo.getXapi(vm).exportVm(vm._xapiId, {
const sourceStream = await this._app.getXapi(vm).exportVm(vm._xapiId, {
compress,
})
@@ -803,7 +803,7 @@ export default class {
async rollingBackupVm({ vm, remoteId, tag, retention, compress }) {
const transferStart = Date.now()
const handler = await this._xo.getRemoteHandler(remoteId)
const handler = await this._app.getRemoteHandler(remoteId)
const files = await handler.list('.')
@@ -821,7 +821,7 @@ export default class {
}
async rollingSnapshotVm(vm, tag, retention) {
const xapi = this._xo.getXapi(vm)
const xapi = this._app.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
await xapi.VM_assertHealthyVdiChains(vm.$ref)
@@ -857,9 +857,9 @@ export default class {
tag = 'DR_' + tag
const reg = new RegExp('^' + escapeRegExp(`${vm.name_label}_${tag}_`) + '[0-9]{8}T[0-9]{6}Z$')
const targetXapi = this._xo.getXapi(sr)
const targetXapi = this._app.getXapi(sr)
sr = targetXapi.getObject(sr._xapiId)
const sourceXapi = this._xo.getXapi(vm)
const sourceXapi = this._app.getXapi(vm)
vm = sourceXapi.getObject(vm._xapiId)
const vms = {}
@@ -907,7 +907,7 @@ export default class {
// -----------------------------------------------------------------
_mountVhd(remoteId, vhdPath) {
return Promise.all([this._xo.getRemoteHandler(remoteId), tmpDir()]).then(([handler, mountDir]) => {
return Promise.all([this._app.getRemoteHandler(remoteId), tmpDir()]).then(([handler, mountDir]) => {
if (!handler._getRealPath) {
throw new Error(`this remote is not supported`)
}

View File

@@ -33,8 +33,8 @@ export default class {
prefix: 'xo:cloudConfig',
}))
app.on('clean', () => db.rebuildIndexes())
app.on('start', () =>
app.hooks.on('clean', () => db.rebuildIndexes())
app.hooks.on('start', () =>
app.addConfigManager(
'cloudConfigs',
() => db.get(),

View File

@@ -28,7 +28,7 @@ export default class BackupNgFileRestore {
// clean any LVM volumes that might have not been properly
// unmounted
app.on('start', async () => {
app.hooks.on('start', async () => {
await Promise.all([execa('losetup', ['-D']), execa('vgchange', ['-an'])])
await execa('pvscan', ['--cache'])
})

View File

@@ -1,53 +0,0 @@
import emitAsync from '@xen-orchestra/emit-async'
import { createLogger } from '@xen-orchestra/log'
const log = createLogger('xo:xo-mixins:hooks')
const makeSingletonHook = (hook, postEvent) => {
let promise
return function () {
if (promise === undefined) {
promise = runHook(this, hook)
promise.then(() => {
this.removeAllListeners(hook)
this.emit(postEvent)
this.removeAllListeners(postEvent)
})
}
return promise
}
}
const runHook = (app, hook) => {
log.debug(`${hook} start…`)
const promise = emitAsync.call(
app,
{
onError: error => log.warn(`hook ${hook} failure:`, { error }),
},
hook
)
promise.then(() => {
log.debug(`${hook} finished`)
})
return promise
}
export default {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.
clean() {
return runHook(this, 'clean')
},
// Run *start* async listeners.
//
// They initialize the application.
start: makeSingletonHook('start', 'started'),
// Run *stop* async listeners.
//
// They close connections, unmount file systems, save states, etc.
stop: makeSingletonHook('stop', 'stopped'),
}

View File

@@ -35,14 +35,14 @@ const _isAddressInIpPool = (address, network, ipPool) =>
// Note: an address cannot be in two different pools sharing a
// network.
export default class IpPools {
constructor(xo) {
constructor(app) {
this._store = null
this._xo = xo
this._app = app
xo.on('start', async () => {
this._store = await xo.getStore('ipPools')
app.hooks.on('start', async () => {
this._store = await app.getStore('ipPools')
xo.addConfigManager(
app.addConfigManager(
'ipPools',
() => this.getAllIpPools(),
ipPools => Promise.all(ipPools.map(ipPool => this._save(ipPool)))
@@ -68,9 +68,9 @@ export default class IpPools {
if (await store.has(id)) {
await Promise.all(
(await this._xo.getAllResourceSets()).map(async set => {
await this._xo.removeLimitFromResourceSet(`ipPool:${id}`, set.id)
return this._xo.removeIpPoolFromResourceSet(id, set.id)
(await this._app.getAllResourceSets()).map(async set => {
await this._app.removeLimitFromResourceSet(`ipPool:${id}`, set.id)
return this._app.removeIpPoolFromResourceSet(id, set.id)
})
)
await this._removeIpAddressesFromVifs(mapValues((await this.getIpPool(id)).addresses, 'vifs'))
@@ -91,9 +91,9 @@ export default class IpPools {
async getAllIpPools(userId) {
let filter
if (userId != null) {
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
if (user.permission !== 'admin') {
const resourceSets = await this._xo.getAllResourceSets(userId)
const resourceSets = await this._app.getAllResourceSets(userId)
const ipPools = lightSet(flatten(resourceSets.map(_ => _.ipPools)))
filter = ({ id }) => ipPools.has(id)
}
@@ -120,7 +120,7 @@ export default class IpPools {
const vifs = vm.VIFs
const ipPools = []
for (const vifId of vifs) {
const { allowedIpv4Addresses, allowedIpv6Addresses, $network } = this._xo.getObject(vifId)
const { allowedIpv4Addresses, allowedIpv6Addresses, $network } = this._app.getObject(vifId)
for (const address of concat(allowedIpv4Addresses, allowedIpv6Addresses)) {
const ipPool = await this._getAddressIpPool(address, $network)
@@ -136,8 +136,8 @@ export default class IpPools {
const updatedIpPools = {}
const limits = {}
const xoVif = this._xo.getObject(vifId)
const xapi = this._xo.getXapi(xoVif)
const xoVif = this._app.getObject(vifId)
const xapi = this._app.getXapi(xoVif)
const vif = xapi.getObject(xoVif._xapiId)
const allocAndSave = (() => {
@@ -146,7 +146,7 @@ export default class IpPools {
return () => {
const saveIpPools = () => Promise.all(mapToArray(updatedIpPools, ipPool => this._save(ipPool)))
return resourseSetId
? this._xo.allocateLimitsInResourceSet(limits, resourseSetId).then(saveIpPools)
? this._app.allocateLimitsInResourceSet(limits, resourseSetId).then(saveIpPools)
: saveIpPools()
}
})()
@@ -209,13 +209,13 @@ export default class IpPools {
})
})
const { getXapi } = this._xo
const { getXapi } = this._app
return Promise.all(
mapToArray(mapVifAddresses, (addresses, vifId) => {
let vif
try {
// The IP may not have been correctly deallocated from the IP pool when the VIF was deleted
vif = this._xo.getObject(vifId)
vif = this._app.getObject(vifId)
} catch (error) {
return
}

View File

@@ -134,11 +134,11 @@ export default class Jobs {
return this._runningJobs
}
constructor(xo: any) {
this._app = xo
constructor(app: any) {
this._app = app
const executors = (this._executors = { __proto__: null })
const jobsDb = (this._jobs = new JobsDb({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:job',
indexes: ['user_id', 'key'],
}))
@@ -148,11 +148,11 @@ export default class Jobs {
executors.call = executeCall
xo.on('clean', () => jobsDb.rebuildIndexes())
xo.on('start', async () => {
this._logger = await xo.getLogger('jobs')
app.hooks.on('clean', () => jobsDb.rebuildIndexes())
app.hooks.on('start', async () => {
this._logger = await app.getLogger('jobs')
xo.addConfigManager(
app.addConfigManager(
'jobs',
() => jobsDb.get(),
jobs => Promise.all(jobs.map(job => jobsDb.save(job))),
@@ -160,14 +160,14 @@ export default class Jobs {
)
})
// it sends a report for the interrupted backup jobs
xo.on('plugins:registered', () =>
app.on('plugins:registered', () =>
asyncMapSettled(this._jobs.get(), job => {
// only the interrupted backup jobs have the runId property
if (job.runId === undefined) {
return
}
xo.emit(
app.emit(
'job:terminated',
// This cast can be removed after merging the PR: https://github.com/vatesfr/xen-orchestra/pull/3209
String(job.runId),

View File

@@ -6,7 +6,7 @@ export default class Logs {
constructor(app) {
this._app = app
app.on('clean', () => this._gc())
app.hooks.on('clean', () => this._gc())
}
async _gc(keep = 2e4) {

View File

@@ -4,7 +4,6 @@ import cloneDeep from 'lodash/cloneDeep'
import Disposable from 'promise-toolbox/Disposable'
import { Backup } from '@xen-orchestra/backups/Backup'
import { createLogger } from '@xen-orchestra/log'
import { parseDuration } from '@vates/parse-duration'
import { parseMetadataBackupId } from '@xen-orchestra/backups/parseMetadataBackupId'
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup'
import { Task } from '@xen-orchestra/backups/Task'
@@ -94,17 +93,16 @@ export default class metadataBackup {
return this._runningMetadataRestores
}
constructor(app: any, { config: { backups } }) {
constructor(app: any) {
this._app = app
this._backupOptions = backups
this._logger = undefined
this._runningMetadataRestores = new Set()
const debounceDelay = parseDuration(backups.listingDebounce)
const debounceDelay = app.config.getDuration('backups.listingDebounce')
this._listXoMetadataBackups = debounceWithKey(this._listXoMetadataBackups, debounceDelay, remoteId => remoteId)
this._listPoolMetadataBackups = debounceWithKey(this._listPoolMetadataBackups, debounceDelay, remoteId => remoteId)
app.on('start', async () => {
app.hooks.on('start', async () => {
this._logger = await app.getLogger('metadataRestore')
app.registerJobExecutor(METADATA_BACKUP_JOB_TYPE, this._executor.bind(this))
@@ -204,7 +202,7 @@ export default class metadataBackup {
},
() =>
new Backup({
config: this._backupOptions,
config: this._app.config.get('backups'),
getAdapter: async remoteId => app.getBackupsRemoteAdapter(await app.getRemoteWithCredentials(remoteId)),
// `@xen-orchestra/backups/Backup` expect that `getConnectedRecord` returns a promise

View File

@@ -1,13 +1,13 @@
import { differenceBy } from 'lodash'
export default class {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
}
getPatchesDifference(hostA, hostB) {
const patchesA = this._xo.getObject(hostA).patches.map(patchId => this._xo.getObject(patchId))
const patchesB = this._xo.getObject(hostB).patches.map(patchId => this._xo.getObject(patchId))
const patchesA = this._app.getObject(hostA).patches.map(patchId => this._app.getObject(patchId))
const patchesB = this._app.getObject(hostB).patches.map(patchId => this._app.getObject(patchId))
return differenceBy(patchesA, patchesB, 'name').map(patch => patch.name)
}

View File

@@ -11,19 +11,20 @@ import { PluginsMetadata } from '../models/plugin-metadata'
const log = createLogger('xo:xo-mixins:plugins')
export default class {
constructor(xo) {
constructor(app) {
this._ajv = new Ajv({
strict: 'log',
useDefaults: true,
})
}).addVocabulary(['$type', 'enumNames'])
this._plugins = { __proto__: null }
this._pluginsMetadata = new PluginsMetadata({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:plugin-metadata',
})
xo.on('start', () => {
xo.addConfigManager(
app.hooks.on('start', () => {
app.addConfigManager(
'plugins',
() => this._pluginsMetadata.get(),
plugins => Promise.all(plugins.map(plugin => this._pluginsMetadata.save(plugin)))

View File

@@ -1,20 +1,20 @@
import { difference, flatten, isEmpty, uniq } from 'lodash'
export default class Pools {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
}
async mergeInto({ sources: sourceIds, target, force }) {
const { _xo } = this
const targetHost = _xo.getObject(target.master)
const { _app } = this
const targetHost = _app.getObject(target.master)
const sources = []
const sourcePatches = {}
// Check hosts compatibility.
for (const sourceId of sourceIds) {
const source = _xo.getObject(sourceId)
const sourceHost = _xo.getObject(source.master)
const source = _app.getObject(sourceId)
const sourceHost = _app.getObject(source.master)
if (sourceHost.productBrand !== targetHost.productBrand) {
throw new Error(`a ${sourceHost.productBrand} pool cannot be merged into a ${targetHost.productBrand} pool`)
}
@@ -27,16 +27,16 @@ export default class Pools {
// Find missing patches on the target.
const targetRequiredPatches = uniq(
flatten(await Promise.all(sources.map(({ master }) => _xo.getPatchesDifference(master, target.master))))
flatten(await Promise.all(sources.map(({ master }) => _app.getPatchesDifference(master, target.master))))
)
// Find missing patches on the sources.
const allRequiredPatches = targetRequiredPatches.concat(
targetHost.patches.map(patchId => _xo.getObject(patchId).name)
targetHost.patches.map(patchId => _app.getObject(patchId).name)
)
const sourceRequiredPatches = {}
for (const sourceId of sourceIds) {
const _sourcePatches = sourcePatches[sourceId].map(patchId => _xo.getObject(patchId).name)
const _sourcePatches = sourcePatches[sourceId].map(patchId => _app.getObject(patchId).name)
const requiredPatches = difference(allRequiredPatches, _sourcePatches)
if (requiredPatches.length > 0) {
sourceRequiredPatches[sourceId] = requiredPatches
@@ -50,9 +50,9 @@ export default class Pools {
// Find patches in parallel.
const findPatchesPromises = []
const sourceXapis = {}
const targetXapi = _xo.getXapi(target)
const targetXapi = _app.getXapi(target)
for (const sourceId of sourceIds) {
const sourceXapi = (sourceXapis[sourceId] = _xo.getXapi(sourceId))
const sourceXapi = (sourceXapis[sourceId] = _app.getXapi(sourceId))
findPatchesPromises.push(sourceXapi.findPatches(sourceRequiredPatches[sourceId] ?? []))
}
const patchesName = await Promise.all([targetXapi.findPatches(targetRequiredPatches), ...findPatchesPromises])
@@ -78,7 +78,7 @@ export default class Pools {
// Merge the sources into the target sequentially to be safe.
for (const source of sources) {
await _xo.mergeXenPools(source._xapiId, target._xapiId, force)
await _app.mergeXenPools(source._xapiId, target._xapiId, force)
}
}
}

View File

@@ -39,22 +39,23 @@ const assertProxyAddress = (proxy, address) => {
}
export default class Proxy {
constructor(app, { config: conf }) {
constructor(app) {
this._app = app
const xoProxyConf = (this._xoProxyConf = conf['xo-proxy'])
const rules = {
'{date}': (date = new Date()) => date.toISOString(),
}
this._generateDefaultProxyName = compileTemplate(xoProxyConf.proxyName, rules)
this._generateDefaultVmName = compileTemplate(xoProxyConf.vmName, rules)
app.config.watch('xo-proxy', xoProxyConf => {
this._generateDefaultProxyName = compileTemplate(xoProxyConf.proxyName, rules)
this._generateDefaultVmName = compileTemplate(xoProxyConf.vmName, rules)
})
const db = (this._db = new Collection({
connection: app._redis,
indexes: ['address', 'vmUuid'],
prefix: 'xo:proxy',
}))
app.on('clean', () => db.rebuildIndexes())
app.on('start', () =>
app.hooks.on('clean', () => db.rebuildIndexes())
app.hooks.on('start', () =>
app.addConfigManager(
'proxies',
() => db.get(),
@@ -97,7 +98,7 @@ export default class Proxy {
await this._app
.unbindLicense({
boundObjectId: vmUuid,
productId: this._xoProxyConf.licenseProductId,
productId: this._app.config.get('xo-proxy.licenseProductId'),
})
.catch(log.warn)
}
@@ -166,7 +167,7 @@ export default class Proxy {
xenstoreData['vm-data/xoa-updater-proxy-url'] = JSON.stringify(httpProxy)
}
if (upgrade) {
xenstoreData['vm-data/xoa-updater-channel'] = JSON.stringify(this._xoProxyConf.channel)
xenstoreData['vm-data/xoa-updater-channel'] = JSON.stringify(this._app.config.get('xo-proxy.channel'))
}
const { vmUuid } = await this._getProxy(id)
@@ -193,7 +194,7 @@ export default class Proxy {
@defer
async _createProxyVm($defer, srId, licenseId, { httpProxy, networkId, networkConfiguration }) {
const app = this._app
const xoProxyConf = this._xoProxyConf
const xoProxyConf = app.config.get('xo-proxy')
const namespace = xoProxyConf.namespace
const {
@@ -261,7 +262,7 @@ export default class Proxy {
async deployProxy(srId, licenseId, { httpProxy, networkConfiguration, networkId, proxyId } = {}) {
const app = this._app
const xoProxyConf = this._xoProxyConf
const xoProxyConf = app.config.get('xo-proxy')
const redeploy = proxyId !== undefined
if (redeploy) {
@@ -356,7 +357,7 @@ export default class Proxy {
pathname: '/api/v1',
protocol: 'https:',
rejectUnauthorized: false,
timeout: parseDuration(this._xoProxyConf.callTimeout),
timeout: this._app.config.getDuration('xo-proxy.callTimeout'),
}
if (proxy.vmUuid !== undefined) {

View File

@@ -17,20 +17,19 @@ const obfuscateRemote = ({ url, ...remote }) => {
}
export default class {
constructor(xo, { config: { remoteOptions } }) {
constructor(app) {
this._handlers = { __proto__: null }
this._remoteOptions = remoteOptions
this._remotes = new Remotes({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:remote',
indexes: ['enabled'],
})
this._remotesInfo = {}
this._xo = xo
this._app = app
xo.on('clean', () => this._remotes.rebuildIndexes())
xo.on('start', async () => {
xo.addConfigManager(
app.hooks.on('clean', () => this._remotes.rebuildIndexes())
app.hooks.on('start', async () => {
app.addConfigManager(
'remotes',
() => this._remotes.get(),
remotes => Promise.all(remotes.map(remote => this._remotes.update(remote)))
@@ -41,7 +40,7 @@ export default class {
ignoreErrors.call(this.updateRemote(remote.id, {}))
})
})
xo.on('stop', async () => {
app.hooks.on('stop', async () => {
const handlers = this._handlers
for (const id in handlers) {
try {
@@ -69,7 +68,7 @@ export default class {
const handlers = this._handlers
let handler = handlers[id]
if (handler === undefined) {
handler = getHandler(remote, this._remoteOptions)
handler = getHandler(remote, this._app.config.get('remoteOptions'))
try {
await handler.sync()
@@ -90,7 +89,7 @@ export default class {
const { readRate, writeRate, ...answer } =
remote.proxy !== undefined
? await this._xo.callProxyMethod(remote.proxy, 'remote.test', {
? await this._app.callProxyMethod(remote.proxy, 'remote.test', {
remote,
})
: await this.getRemoteHandler(remoteId).then(handler => handler.test())
@@ -126,7 +125,7 @@ export default class {
const promise =
remote.proxy !== undefined
? this._xo.callProxyMethod(remote.proxy, 'remote.getInfo', {
? this._app.callProxyMethod(remote.proxy, 'remote.getInfo', {
remote,
})
: this.getRemoteHandler(remote.id).then(handler => handler.getInfo())

View File

@@ -62,19 +62,19 @@ const normalize = set => ({
// ===================================================================
export default class {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
this._store = null
xo.on('start', async () => {
xo.addConfigManager(
app.hooks.on('start', async () => {
app.addConfigManager(
'resourceSets',
() => this.getAllResourceSets(),
resourceSets => Promise.all(resourceSets.map(resourceSet => this._save(resourceSet))),
['groups', 'users']
)
this._store = await xo.getStore('resourceSets')
this._store = await app.getStore('resourceSets')
})
}
@@ -93,7 +93,7 @@ export default class {
async checkResourceSetConstraints(id, userId, objectIds) {
const set = await this.getResourceSet(id)
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
if (
(user.permission !== 'admin' &&
// The set does not contains ANY subjects related to this user
@@ -109,13 +109,13 @@ export default class {
async computeVmResourcesUsage(vm) {
return Object.assign(
computeVmXapiResourcesUsage(this._xo.getXapi(vm).getObject(vm._xapiId)),
await this._xo.computeVmIpPoolsUsage(vm)
computeVmXapiResourcesUsage(this._app.getXapi(vm).getObject(vm._xapiId)),
await this._app.computeVmIpPoolsUsage(vm)
)
}
async computeVmSnapshotResourcesUsage(snapshot) {
if (this._xo._config.selfService?.ignoreVmSnapshotResources) {
if (this._app.config.get('selfService.ignoreVmSnapshotResources')) {
return {}
}
return this.computeVmResourcesUsage(snapshot)
@@ -145,7 +145,9 @@ export default class {
if (await store.has(id)) {
await Promise.all(
mapToArray(this._xo.getObjects({ filter: { resourceSet: id } }), vm => this.setVmResourceSet(vm.id, null, true))
mapToArray(this._app.getObjects({ filter: { resourceSet: id } }), vm =>
this.setVmResourceSet(vm.id, null, true)
)
)
return store.del(id)
}
@@ -167,12 +169,12 @@ export default class {
await Promise.all(
difference(set.subjects, subjects).map(async subjectId =>
Promise.all(
(await this._xo.getAclsForSubject(subjectId)).map(async acl => {
(await this._app.getAclsForSubject(subjectId)).map(async acl => {
try {
const object = this._xo.getObject(acl.object)
const object = this._app.getObject(acl.object)
if ((object.type === 'VM' || object.type === 'VM-snapshot') && object.resourceSet === id) {
await this._xo.removeAcl(subjectId, acl.object, acl.action)
$defer.onFailure(() => this._xo.addAcl(subjectId, acl.object, acl.action))
await this._app.removeAcl(subjectId, acl.object, acl.action)
$defer.onFailure(() => this._app.addAcl(subjectId, acl.object, acl.action))
}
} catch (error) {
if (!noSuchObject.is(error)) {
@@ -219,7 +221,7 @@ export default class {
async getAllResourceSets(userId = undefined) {
let filter
if (userId != null) {
const user = await this._xo.getUser(userId)
const user = await this._app.getUser(userId)
if (user.permission !== 'admin') {
const userHasSubject = lightSet(user.groups).add(user.id).has
filter = set => some(set.subjects, userHasSubject)
@@ -341,7 +343,7 @@ export default class {
})
await Promise.all(
mapToArray(this._xo.getAllXapis(), xapi =>
mapToArray(this._app.getAllXapis(), xapi =>
Promise.all(
mapToArray(xapi.objects.all, async object => {
let id
@@ -358,7 +360,7 @@ export default class {
}
const { limits } = set
forEach(await this.computeResourcesUsage(this._xo.getObject(object.$id)), (usage, resource) => {
forEach(await this.computeResourcesUsage(this._app.getObject(object.$id)), (usage, resource) => {
const limit = limits[resource]
if (limit) {
limit.available -= usage
@@ -374,14 +376,14 @@ export default class {
@deferrable
async setVmResourceSet($defer, vmId, resourceSetId, force = false) {
const xapi = this._xo.getXapi(vmId)
const xapi = this._app.getXapi(vmId)
const previousResourceSetId = xapi.xo.getData(vmId, 'resourceSet')
if (resourceSetId === previousResourceSetId || (previousResourceSetId === undefined && resourceSetId === null)) {
return
}
const resourcesUsage = await this.computeResourcesUsage(this._xo.getObject(vmId))
const resourcesUsage = await this.computeResourcesUsage(this._app.getObject(vmId))
if (resourceSetId != null) {
await this.allocateLimitsInResourceSet(resourcesUsage, resourceSetId, force)
@@ -399,7 +401,7 @@ export default class {
)
if (previousResourceSetId !== undefined) {
await this._xo.removeAclsForObject(vmId)
await this._app.removeAclsForObject(vmId)
}
if (resourceSetId != null) {
await this.shareVmResourceSet(vmId)
@@ -407,13 +409,13 @@ export default class {
}
async shareVmResourceSet(vmId) {
const xapi = this._xo.getXapi(vmId)
const xapi = this._app.getXapi(vmId)
const resourceSetId = xapi.xo.getData(vmId, 'resourceSet')
if (resourceSetId === undefined) {
throw new Error('the vm is not in a resource set')
}
const { subjects } = await this.getResourceSet(resourceSetId)
await asyncMapSettled(subjects, subject => this._xo.addAcl(subject, vmId, 'admin'))
await asyncMapSettled(subjects, subject => this._app.addAcl(subject, vmId, 'admin'))
}
}

View File

@@ -60,7 +60,7 @@ export default class Scheduling {
this._runs = { __proto__: null }
app.on('clean', async () => {
app.hooks.on('clean', async () => {
const [jobsById, schedules] = await Promise.all([
app.getAllJobs().then(_ => keyBy(_, 'id')),
app.getAllSchedules(),
@@ -70,7 +70,7 @@ export default class Scheduling {
return db.rebuildIndexes()
})
app.on('start', async () => {
app.hooks.on('start', async () => {
app.addConfigManager(
'schedules',
() => db.get(),
@@ -85,7 +85,7 @@ export default class Scheduling {
const schedules = await this.getAllSchedules()
schedules.forEach(schedule => this._start(schedule))
})
app.on('stop', () => {
app.hooks.on('stop', () => {
const runs = this._runs
Object.keys(runs).forEach(id => {
runs[id]()

View File

@@ -48,8 +48,8 @@ const valueEncoding = {
}
export default class {
constructor(xo, { config }) {
const dir = `${config.datadir}/leveldb`
constructor(app) {
const dir = `${app.config.get('datadir')}/leveldb`
this._db = ensureDir(dir).then(() => levelup(dir))
}

View File

@@ -19,10 +19,10 @@ const removeFromArraySet = (set, value) => set && filter(set, current => current
// ===================================================================
export default class {
constructor(xo) {
this._xo = xo
constructor(app) {
this._app = app
const redis = xo._redis
const redis = app._redis
const groupsDb = (this._groups = new Groups({
connection: redis,
@@ -34,15 +34,15 @@ export default class {
indexes: ['email'],
}))
xo.on('clean', () => Promise.all([groupsDb.rebuildIndexes(), usersDb.rebuildIndexes()]))
xo.on('start', async () => {
xo.addConfigManager(
app.hooks.on('clean', () => Promise.all([groupsDb.rebuildIndexes(), usersDb.rebuildIndexes()]))
app.hooks.on('start', async () => {
app.addConfigManager(
'groups',
() => groupsDb.get(),
groups => Promise.all(groups.map(group => groupsDb.save(group))),
['users']
)
xo.addConfigManager(
app.addConfigManager(
'users',
() => usersDb.get(),
users =>
@@ -94,19 +94,19 @@ export default class {
await this._users.remove(id)
// Remove tokens of user.
this._xo
this._app
.getAuthenticationTokensForUser(id)
.then(tokens => {
forEach(tokens, token => {
this._xo.deleteAuthenticationToken(id)::ignoreErrors()
this._app.deleteAuthenticationToken(id)::ignoreErrors()
})
})
::ignoreErrors()
// Remove ACLs for this user.
this._xo.getAclsForSubject(id).then(acls => {
this._app.getAclsForSubject(id).then(acls => {
forEach(acls, acl => {
this._xo.removeAcl(id, acl.object, acl.action)::ignoreErrors()
this._app.removeAcl(id, acl.object, acl.action)::ignoreErrors()
})
})
@@ -222,7 +222,7 @@ export default class {
return user
}
if (!this._xo._config.createUserOnFirstSignin) {
if (!this._app.config.get('createUserOnFirstSignin')) {
throw new Error(`registering ${name} user is forbidden`)
}
@@ -251,7 +251,7 @@ export default class {
conflictingUser = users.find(user => user.email === name)
if (conflictingUser !== undefined) {
if (!this._xo._config.authentication.mergeProvidersUsers) {
if (!this._app.config.get('authentication.mergeProvidersUsers')) {
throw new Error(`User with username ${name} already exists`)
}
if (user !== undefined) {
@@ -269,7 +269,7 @@ export default class {
}
if (user === undefined) {
if (!this._xo._config.createUserOnFirstSignin) {
if (!this._app.config.get('createUserOnFirstSignin')) {
throw new Error(`registering ${name} user is forbidden`)
}
user = await this.createUser({
@@ -334,9 +334,9 @@ export default class {
await this._groups.remove(id)
// Remove ACLs for this group.
this._xo.getAclsForSubject(id).then(acls => {
this._app.getAclsForSubject(id).then(acls => {
forEach(acls, acl => {
this._xo.removeAcl(id, acl.object, acl.action)::ignoreErrors()
this._app.removeAcl(id, acl.object, acl.action)::ignoreErrors()
})
})

View File

@@ -3,7 +3,6 @@ import { createLogger } from '@xen-orchestra/log'
import { fibonacci } from 'iterable-backoff'
import { findKey } from 'lodash'
import { noSuchObject } from 'xo-common/api-errors'
import { parseDuration } from '@vates/parse-duration'
import { pDelay, ignoreErrors } from 'promise-toolbox'
import * as XenStore from '../_XenStore'
@@ -36,25 +35,24 @@ const log = createLogger('xo:xo-mixins:xen-servers')
// - _xapis[server.id] id defined
// - _serverIdsByPool[xapi.pool.$id] is server.id
export default class {
constructor(xo, { config: { guessVhdSizeOnImport, xapiMarkDisconnectedDelay, xapiOptions }, safeMode }) {
constructor(app, { safeMode }) {
this._objectConflicts = { __proto__: null } // TODO: clean when a server is disconnected.
const serversDb = (this._servers = new Servers({
connection: xo._redis,
connection: app._redis,
prefix: 'xo:server',
indexes: ['host'],
}))
this._serverIdsByPool = { __proto__: null }
this._stats = new XapiStats()
this._xapiOptions = {
guessVhdSizeOnImport,
...xapiOptions,
}
this._xapis = { __proto__: null }
this._xo = xo
this._xapiMarkDisconnectedDelay = parseDuration(xapiMarkDisconnectedDelay)
this._app = app
xo.on('clean', () => serversDb.rebuildIndexes())
xo.on('start', async () => {
app.config.watchDuration('xapiMarkDisconnectedDelay', xapiMarkDisconnectedDelay => {
this._xapiMarkDisconnectedDelay = xapiMarkDisconnectedDelay
})
app.hooks.on('clean', () => serversDb.rebuildIndexes())
app.hooks.on('start', async () => {
const connectServers = async () => {
// Connects to existing servers.
for (const server of await serversDb.get()) {
@@ -69,7 +67,7 @@ export default class {
}
}
xo.addConfigManager(
app.addConfigManager(
'xenServers',
() => serversDb.get(),
servers => serversDb.update(servers).then(connectServers)
@@ -174,7 +172,7 @@ export default class {
getXenServerIdByObject(object, type) {
if (typeof object === 'string') {
object = this._xo.getObject(object, type)
object = this._app.getObject(object, type)
}
const { $pool: poolId } = object
if (!poolId) {
@@ -190,7 +188,7 @@ export default class {
_onXenAdd(newXapiObjects, xapiIdsToXo, toRetry, conId, dependents, xapiObjects) {
const conflicts = this._objectConflicts
const objects = this._xo._objects
const objects = this._app._objects
const serverIdsByPool = this._serverIdsByPool
forEach(newXapiObjects, function handleObject(xapiObject, xapiId) {
@@ -235,7 +233,7 @@ export default class {
_onXenRemove(xapiObjects, xapiIdsToXo, toRetry, conId) {
const conflicts = this._objectConflicts
const objects = this._xo._objects
const objects = this._app._objects
forEach(xapiObjects, (_, xapiId) => {
toRetry && delete toRetry[xapiId]
@@ -272,11 +270,14 @@ export default class {
throw new Error('the server is already connected')
}
const { config } = this._app
const xapi = (this._xapis[server.id] = new Xapi({
allowUnauthorized: server.allowUnauthorized,
readOnly: server.readOnly,
...this._xapiOptions,
...config.get('xapiOptions'),
guessVhdSizeOnImport: config.get('guessVhdSizeOnImport'),
auth: {
user: server.username,
@@ -347,7 +348,7 @@ export default class {
}
return {
httpRequest: this._xo.httpRequest.bind(this),
httpRequest: this._app.httpRequest.bind(this),
install() {
objects.on('add', onAddOrUpdate)
@@ -415,9 +416,9 @@ export default class {
xapi.xo.uninstall()
delete this._xapis[server.id]
delete this._serverIdsByPool[poolId]
this._xo.emit('server:disconnected', { server, xapi })
this._app.emit('server:disconnected', { server, xapi })
})
this._xo.emit('server:connected', { server, xapi })
this._app.emit('server:connected', { server, xapi })
} catch (error) {
delete this._xapis[server.id]
xapi.disconnect()::ignoreErrors()
@@ -449,7 +450,7 @@ export default class {
// returns the XAPI object corresponding to an XO object/ID
getXapiObject(xoObjectOrId, xoType) {
const xoObject = typeof xoObjectOrId === 'string' ? this._xo.getObject(xoObjectOrId, xoType) : xoObjectOrId
const xoObject = typeof xoObjectOrId === 'string' ? this._app.getObject(xoObjectOrId, xoType) : xoObjectOrId
return this.getXapi(xoObject).getObjectByRef(xoObject._xapiRef)
}

View File

@@ -1,6 +1,9 @@
import Config from '@xen-orchestra/mixins/Config'
import Hooks from '@xen-orchestra/mixins/Hooks'
import mixin from '@xen-orchestra/mixin'
import mixinLegacy from '@xen-orchestra/mixin/legacy'
import XoCollection from 'xo-collection'
import XoUniqueIndex from 'xo-collection/unique-index'
import mixin from '@xen-orchestra/mixin'
import { createClient as createRedisClient } from 'redis'
import { createDebounceResource } from '@vates/disposable/debounceResource'
import { createLogger } from '@xen-orchestra/log'
@@ -17,15 +20,17 @@ import { generateToken, noop } from './utils'
const log = createLogger('xo:xo')
@mixin(Object.values(mixins))
@mixinLegacy(Object.values(mixins))
export default class Xo extends EventEmitter {
constructor({ config }) {
constructor(opts) {
super()
// a lot of mixins adds listener for start/stop/… events
this.setMaxListeners(0)
mixin(this, { Config, Hooks }, [opts])
this._config = config
// a lot of mixins adds listener for start/stop/… events
this.hooks.setMaxListeners(0)
const { config } = opts
this._objects = new XoCollection()
this._objects.createIndex('byRef', new XoUniqueIndex('_xapiRef'))
@@ -47,11 +52,11 @@ export default class Xo extends EventEmitter {
})
}
this.on('start', () => this._watchObjects())
this.hooks.on('start', () => this._watchObjects())
const debounceResource = createDebounceResource()
debounceResource.defaultDelay = parseDuration(config.resourceCacheDelay)
this.once('stop', debounceResource.flushAll)
this.hooks.on('stop', debounceResource.flushAll)
this.debounceResource = debounceResource
}

View File

@@ -28,7 +28,7 @@
"core-js": "^3.0.0",
"lodash": "^4.17.15",
"pako": "^1.0.11",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"vhd-lib": "^1.0.0",
"xml2js": "^0.4.23"
},

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-web",
"version": "5.80.0",
"version": "5.80.1",
"license": "AGPL-3.0-or-later",
"description": "Web interface client for Xen-Orchestra",
"keywords": [
@@ -97,7 +97,7 @@
"moment-timezone": "^0.5.14",
"notifyjs": "^3.0.0",
"otplib": "^11.0.0",
"promise-toolbox": "^0.18.0",
"promise-toolbox": "^0.19.0",
"prop-types": "^15.6.0",
"qrcode": "^1.3.2",
"random-password": "^0.1.2",

View File

@@ -139,7 +139,7 @@ const messages = {
homeHostPage: 'Hosts',
homePoolPage: 'Pools',
homeTemplatePage: 'Templates',
homeSrPage: 'Storages',
homeSrPage: 'Storage',
dashboardPage: 'Dashboard',
overviewDashboardPage: 'Overview',
overviewVisualizationDashboardPage: 'Visualizations',
@@ -268,8 +268,8 @@ const messages = {
homeSortVmsBySnapshots: 'Snapshots',
homeSortByContainer: 'Container',
homeSortByPool: 'Pool',
homeDisplayedItems: '{displayed, number}x {icon} (on {total, number})',
homeSelectedItems: '{selected, number}x {icon} selected (on {total, number})',
homeDisplayedItems: '{displayed, number}x {icon} (of {total, number})',
homeSelectedItems: '{selected, number}x {icon} selected (of {total, number})',
homeMore: 'More',
homeMigrateTo: 'Migrate to…',
homeMissingPatches: 'Missing patches',
@@ -789,7 +789,7 @@ const messages = {
poolRamUsage: '{used} used of {total} ({free} free)',
poolMaster: 'Master:',
displayAllHosts: 'Display all hosts of this pool',
displayAllStorages: 'Display all storages of this pool',
displayAllStorages: 'Display all storage for this pool',
displayAllVMs: 'Display all VMs of this pool',
licenseRestrictions: 'License restrictions',
licenseRestrictionsModalTitle: 'Warning: You are using a Free XenServer license',

View File

@@ -206,9 +206,9 @@ const Proxies = decorate([
async deployProxy({ fetchProxyUpgrades }, proxy) {
return fetchProxyUpgrades([await deployProxy(proxy)])
},
async upgradeAppliance({ fetchProxyUpgrades }, id) {
async upgradeAppliance({ fetchProxyUpgrades }, id, options) {
try {
await upgradeProxyAppliance(id)
await upgradeProxyAppliance(id, options)
} catch (error) {
if (!incorrectState.is(error)) {
throw error

View File

@@ -59,6 +59,15 @@
semver "^6.3.0"
source-map "^0.5.0"
"@babel/eslint-parser@^7.13.8":
version "7.13.14"
resolved "https://registry.yarnpkg.com/@babel/eslint-parser/-/eslint-parser-7.13.14.tgz#f80fd23bdd839537221914cb5d17720a5ea6ba3a"
integrity sha512-I0HweR36D73Ibn/FfrRDMKlMqJHFwidIUgYdMpH+aXYuQC+waq59YaJ6t9e9N36axJ82v1jR041wwqDrDXEwRA==
dependencies:
eslint-scope "^5.1.0"
eslint-visitor-keys "^1.3.0"
semver "^6.3.0"
"@babel/generator@^7.13.9", "@babel/generator@^7.4.0":
version "7.13.9"
resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.13.9.tgz#3a7aa96f9efb8e2be42d38d80e2ceb4c64d8de39"
@@ -275,7 +284,7 @@
chalk "^2.0.0"
js-tokens "^4.0.0"
"@babel/parser@^7.1.0", "@babel/parser@^7.12.13", "@babel/parser@^7.13.13", "@babel/parser@^7.4.3", "@babel/parser@^7.6.0", "@babel/parser@^7.7.0", "@babel/parser@^7.9.6":
"@babel/parser@^7.1.0", "@babel/parser@^7.12.13", "@babel/parser@^7.13.13", "@babel/parser@^7.4.3", "@babel/parser@^7.6.0", "@babel/parser@^7.9.6":
version "7.13.13"
resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.13.13.tgz#42f03862f4aed50461e543270916b47dd501f0df"
integrity sha512-OhsyMrqygfk5v8HmWwOzlYjJrtLaFhF34MrfG/Z73DgYCI6ojNUTUp2TYbtnjo8PegeJp12eamsNettCQjKjVw==
@@ -983,7 +992,7 @@
"@babel/parser" "^7.12.13"
"@babel/types" "^7.12.13"
"@babel/traverse@^7.0.0", "@babel/traverse@^7.1.0", "@babel/traverse@^7.13.0", "@babel/traverse@^7.13.13", "@babel/traverse@^7.4.3", "@babel/traverse@^7.7.0":
"@babel/traverse@^7.0.0", "@babel/traverse@^7.1.0", "@babel/traverse@^7.13.0", "@babel/traverse@^7.13.13", "@babel/traverse@^7.4.3":
version "7.13.13"
resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.13.13.tgz#39aa9c21aab69f74d948a486dd28a2dbdbf5114d"
integrity sha512-CblEcwmXKR6eP43oQGG++0QMTtCjAsa3frUuzHoiIJWpaIIi8dwMyEFUJoXRLxagGqCK+jALRwIO+o3R9p/uUg==
@@ -997,7 +1006,7 @@
debug "^4.1.0"
globals "^11.1.0"
"@babel/types@^7.0.0", "@babel/types@^7.0.0-beta.49", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.13.0", "@babel/types@^7.13.12", "@babel/types@^7.13.13", "@babel/types@^7.13.14", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.6.1", "@babel/types@^7.7.0", "@babel/types@^7.9.6":
"@babel/types@^7.0.0", "@babel/types@^7.0.0-beta.49", "@babel/types@^7.12.1", "@babel/types@^7.12.13", "@babel/types@^7.13.0", "@babel/types@^7.13.12", "@babel/types@^7.13.13", "@babel/types@^7.13.14", "@babel/types@^7.3.0", "@babel/types@^7.3.3", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.6.1", "@babel/types@^7.9.6":
version "7.13.14"
resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.13.14.tgz#c35a4abb15c7cd45a2746d78ab328e362cbace0d"
integrity sha512-A2aa3QTkWoyqsZZFl56MLUsfmh7O0gN41IPvXAE/++8ojpbz12SszD7JEGYVdn4f9Kt4amIei07swF1h4AqmmQ==
@@ -3035,23 +3044,6 @@ babel-core@^6.26.0:
slash "^1.0.0"
source-map "^0.5.7"
babel-core@^7.0.0-0:
version "7.0.0-bridge.0"
resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-7.0.0-bridge.0.tgz#95a492ddd90f9b4e9a4a1da14eb335b87b634ece"
integrity sha512-poPX9mZH/5CSanm50Q+1toVci6pv5KSRv/5TWCwtzQS5XEwn40BcCrgIeMFWP9CKKIniKXNxoIOnOq4VVlGXhg==
babel-eslint@^10.0.1:
version "10.1.0"
resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.1.0.tgz#6968e568a910b78fb3779cdd8b6ac2f479943232"
integrity sha512-ifWaTHQ0ce+448CYop8AdrQiBsGrnC+bMgfyKFdi6EsPLTAWG+QfyDeM6OH+FmWnKvEq5NnBMLvlBUPKQZoDSg==
dependencies:
"@babel/code-frame" "^7.0.0"
"@babel/parser" "^7.7.0"
"@babel/traverse" "^7.7.0"
"@babel/types" "^7.7.0"
eslint-visitor-keys "^1.0.0"
resolve "^1.12.0"
babel-generator@^6.26.0:
version "6.26.1"
resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.26.1.tgz#1844408d3b8f0d35a404ea7ac180f087a601bd90"
@@ -7383,7 +7375,7 @@ eslint-scope@^4.0.3:
esrecurse "^4.1.0"
estraverse "^4.1.1"
eslint-scope@^5.1.1:
eslint-scope@^5.1.0, eslint-scope@^5.1.1:
version "5.1.1"
resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c"
integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==
@@ -7398,7 +7390,7 @@ eslint-utils@^2.0.0, eslint-utils@^2.1.0:
dependencies:
eslint-visitor-keys "^1.1.0"
eslint-visitor-keys@^1.0.0, eslint-visitor-keys@^1.1.0, eslint-visitor-keys@^1.3.0:
eslint-visitor-keys@^1.1.0, eslint-visitor-keys@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e"
integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==
@@ -14963,13 +14955,20 @@ promise-toolbox@^0.16.0:
dependencies:
make-error "^1.3.2"
promise-toolbox@^0.18.0, promise-toolbox@^0.18.1:
promise-toolbox@^0.18.1:
version "0.18.1"
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.18.1.tgz#8070b959ba7d56359e5baaed4c4232af43b6f164"
integrity sha512-oN3ZwiTqdygp78fpTyqcASKtbfuTx6ijWX+Vr7LXoW7l2ACxyyzHPYtO3R8wUOW84FxbzxBQ+XtCJjyEdCIY0Q==
dependencies:
make-error "^1.3.2"
promise-toolbox@^0.19.0:
version "0.19.0"
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.19.0.tgz#a9b85e45c4d97d3f58d08ef66fc1eb7d0f482c40"
integrity sha512-lqdgHxDTvjhnfVAaCdC1hcx3Esdby17Xltw95dxeIENUxFXpV0Y92xmhvGVqZcPf8O9MyqL/ENZ0slW/0whLCA==
dependencies:
make-error "^1.3.2"
promise-toolbox@^0.8.0:
version "0.8.3"
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.8.3.tgz#b757232a21d246d8702df50da6784932dd0f5348"
@@ -16398,7 +16397,7 @@ resolve@1.1.7:
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b"
integrity sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs=
resolve@^1.1.4, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.10.0, resolve@^1.10.1, resolve@^1.12.0, resolve@^1.13.1, resolve@^1.14.2, resolve@^1.15.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.2.0, resolve@^1.3.2, resolve@^1.4.0:
resolve@^1.1.4, resolve@^1.1.6, resolve@^1.1.7, resolve@^1.10.0, resolve@^1.10.1, resolve@^1.13.1, resolve@^1.14.2, resolve@^1.15.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.2.0, resolve@^1.3.2, resolve@^1.4.0:
version "1.20.0"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975"
integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==