feat(proxy): version 1 (#4495)
Co-authored-by: badrAZ <azizbibadr@gmail.com> Co-authored-by: Mathieu <70369997+MathieuRA@users.noreply.github.com>
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -9,6 +9,8 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.js
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"preferGlobal": true,
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/backups": "^0.4.0",
|
||||
"@xen-orchestra/fs": "^0.12.1",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
|
||||
232
@xen-orchestra/backups/Backup.js
Normal file
232
@xen-orchestra/backups/Backup.js
Normal file
@@ -0,0 +1,232 @@
|
||||
const asyncMapSettled = require('@xen-orchestra/async-map').default
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const using = require('promise-toolbox/using')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup')
|
||||
const { Task } = require('./task')
|
||||
const { VmBackup } = require('./_VmBackup')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({
|
||||
config,
|
||||
getAdapter,
|
||||
getConnectedXapi,
|
||||
job,
|
||||
|
||||
recordToXapi,
|
||||
remotes,
|
||||
schedule,
|
||||
}) {
|
||||
this._config = config
|
||||
this._getConnectedXapi = getConnectedXapi
|
||||
this._job = job
|
||||
this._recordToXapi = recordToXapi
|
||||
this._remotes = remotes
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remotes[remoteId]),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
run() {
|
||||
const type = this._job.type
|
||||
if (type === 'backup') {
|
||||
return this._runVmBackup()
|
||||
} else if (type === 'metadataBackup') {
|
||||
return this._runMetadataBackup()
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const config = this._config
|
||||
const job = this._job
|
||||
const schedule = this._schedule
|
||||
|
||||
const settings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...job.settings[''],
|
||||
...job.settings[schedule.id],
|
||||
}
|
||||
|
||||
await using(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.pools).map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const { settings } = job
|
||||
const scheduleSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.vm.defaultSettings,
|
||||
...settings[''],
|
||||
...settings[schedule.id],
|
||||
}
|
||||
|
||||
await using(
|
||||
Disposable.all(extractIdsFromSimplePattern(job.srs).map(_ => this._getRecord('SR', _))),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
async (srs, remoteAdapters) => {
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
using(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
job,
|
||||
// remotes,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...scheduleSettings, ...settings[vmUuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = scheduleSettings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_getRecord = Disposable.factory(this._getRecord)
|
||||
async *_getRecord(type, uuid) {
|
||||
const xapiId = this._recordToXapi[uuid]
|
||||
if (xapiId === undefined) {
|
||||
throw new Error('no XAPI associated to ' + uuid)
|
||||
}
|
||||
|
||||
const xapi = yield this._getConnectedXapi(xapiId)
|
||||
return xapi.getRecordByUuid(type, uuid)
|
||||
}
|
||||
}
|
||||
40
@xen-orchestra/backups/DurablePartition.js
Normal file
40
@xen-orchestra/backups/DurablePartition.js
Normal file
@@ -0,0 +1,40 @@
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
flushAll() {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
return asyncMap(Object.keys(partitionDisposers), path => {
|
||||
const disposers = partitionDisposers[path]
|
||||
delete partitionDisposers[path]
|
||||
return asyncMap(disposers, d => d(path).catch(noop => {}))
|
||||
})
|
||||
}
|
||||
|
||||
async mount(adapter, diskId, partitionId) {
|
||||
const { value: path, dispose } = await adapter.getPartition(diskId, partitionId)
|
||||
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
if (partitionDisposers[path] === undefined) {
|
||||
partitionDisposers[path] = []
|
||||
}
|
||||
partitionDisposers[path].push(dispose)
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
async unmount(path) {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
const disposers = partitionDisposers[path]
|
||||
if (disposers === undefined) {
|
||||
throw new Error(`No partition corresponding to the path ${path} found`)
|
||||
}
|
||||
|
||||
await disposers.pop()()
|
||||
if (disposers.length === 0) {
|
||||
delete partitionDisposers[path]
|
||||
}
|
||||
}
|
||||
}
|
||||
59
@xen-orchestra/backups/ImportVmBackup.js
Normal file
59
@xen-orchestra/backups/ImportVmBackup.js
Normal file
@@ -0,0 +1,59 @@
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { importDeltaVm } = require('./_deltaVm')
|
||||
const { Task } = require('./task')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi }) {
|
||||
this._adapter = adapter
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await adapter.readDeltaVmBackup(metadata)
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: metadata.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
}
|
||||
}
|
||||
555
@xen-orchestra/backups/RemoteAdapter.js
Normal file
555
@xen-orchestra/backups/RemoteAdapter.js
Normal file
@@ -0,0 +1,555 @@
|
||||
const asyncMapSettled = require('@xen-orchestra/async-map').default
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const pump = require('pump')
|
||||
const using = require('promise-toolbox/using')
|
||||
const { basename, dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { ZipFile } = require('yazl')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir')
|
||||
const { getTmpDir } = require('./_getTmpDir')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions')
|
||||
const { lvs, pvs } = require('./_lvm')
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { warn } = createLogger('xo:proxy:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
const isMetadataFile = filename => filename.endsWith('.json')
|
||||
const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
handler.list(path, options).catch(error => {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
warn(`${methodName} ${path}`, { error })
|
||||
}
|
||||
return []
|
||||
})
|
||||
|
||||
const debounceResourceFactory = factory =>
|
||||
function () {
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
exports.RemoteAdapter = class RemoteAdapter {
|
||||
constructor(handler, { debounceResource, dirMode }) {
|
||||
this._debounceResource = debounceResource
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
}
|
||||
|
||||
get handler() {
|
||||
return this._handler
|
||||
}
|
||||
|
||||
async _deleteVhd(path) {
|
||||
const handler = this._handler
|
||||
const vhds = await asyncMapSettled(
|
||||
await handler.list(dirname(path), {
|
||||
filter: isVhdFile,
|
||||
prependDir: true,
|
||||
}),
|
||||
async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
path,
|
||||
}
|
||||
} catch (error) {
|
||||
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
|
||||
// they are probably inconsequent to the backup process and should not
|
||||
// fail it.
|
||||
warn(`BackupNg#_deleteVhd ${path}`, { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
const base = basename(path)
|
||||
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
|
||||
if (child === undefined) {
|
||||
await handler.unlink(path)
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
const childPath = child.path
|
||||
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
|
||||
await handler.rename(path, childPath)
|
||||
return mergedDataSize
|
||||
} catch (error) {
|
||||
handler.unlink(path).catch(warn)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _findPartition(devicePath, partitionId) {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
const partition = partitions.find(_ => _.id === partitionId)
|
||||
if (partition === undefined) {
|
||||
throw new Error(`partition ${partitionId} not found`)
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
|
||||
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
|
||||
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
|
||||
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
|
||||
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
|
||||
|
||||
await fromCallback(execFile, 'vgchange', ['-ay', vgName])
|
||||
try {
|
||||
yield lvs(['lv_name', 'lv_path'], vgName)
|
||||
} finally {
|
||||
await fromCallback(execFile, 'vgchange', ['-an', vgName])
|
||||
}
|
||||
}
|
||||
|
||||
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
|
||||
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
|
||||
async *_getLvmPhysicalVolume(devicePath, partition) {
|
||||
const args = []
|
||||
if (partition !== undefined) {
|
||||
args.push('-o', partition.start * 512, '--sizelimit', partition.size)
|
||||
}
|
||||
args.push('--show', '-f', devicePath)
|
||||
const path = (await fromCallback(execFile, 'losetup', args)).trim()
|
||||
try {
|
||||
await fromCallback(execFile, 'pvscan', ['--cache', path])
|
||||
yield path
|
||||
} finally {
|
||||
try {
|
||||
const vgNames = await pvs('vg_name', path)
|
||||
await fromCallback(execFile, 'vgchange', ['-an', ...vgNames])
|
||||
} finally {
|
||||
await fromCallback(execFile, 'losetup', ['-d', path])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_getPartition = Disposable.factory(this._getPartition)
|
||||
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getPartition = debounceResourceFactory(this._getPartition)
|
||||
async *_getPartition(devicePath, partition) {
|
||||
const options = ['loop', 'ro']
|
||||
|
||||
if (partition !== undefined) {
|
||||
const { size, start } = partition
|
||||
options.push(`sizelimit=${size}`)
|
||||
if (start !== undefined) {
|
||||
options.push(`offset=${start * 512}`)
|
||||
}
|
||||
}
|
||||
|
||||
const path = yield getTmpDir()
|
||||
const mount = options => {
|
||||
return fromCallback(execFile, 'mount', [
|
||||
`--options=${options.join(',')}`,
|
||||
`--source=${devicePath}`,
|
||||
`--target=${path}`,
|
||||
])
|
||||
}
|
||||
|
||||
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
|
||||
// another fs, try without
|
||||
try {
|
||||
await mount([...options, 'norecovery'])
|
||||
} catch (error) {
|
||||
await mount(options)
|
||||
}
|
||||
try {
|
||||
yield path
|
||||
} finally {
|
||||
await fromCallback(execFile, 'umount', ['--lazy', path])
|
||||
}
|
||||
}
|
||||
|
||||
_listLvmLogicalVolumes(devicePath, partition, results = []) {
|
||||
return using(this._getLvmPhysicalVolume(devicePath, partition), async path => {
|
||||
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], path)
|
||||
const partitionId = partition !== undefined ? partition.id : ''
|
||||
lvs.forEach((lv, i) => {
|
||||
const name = lv.lv_name
|
||||
if (name !== '') {
|
||||
results.push({
|
||||
id: `${partitionId}/${lv.vg_name}/${name}`,
|
||||
name,
|
||||
size: lv.lv_size,
|
||||
})
|
||||
}
|
||||
})
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
using(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
).catch(error => {
|
||||
warn(error)
|
||||
reject(error)
|
||||
})
|
||||
return promise
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
let mergedDataSize = 0
|
||||
await asyncMapSettled(backups, ({ _filename, vhds }) =>
|
||||
Promise.all([
|
||||
handler.unlink(_filename),
|
||||
asyncMap(Object.values(vhds), async _ => {
|
||||
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
|
||||
}),
|
||||
])
|
||||
)
|
||||
return mergedDataSize
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
|
||||
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
|
||||
const timestampReg = '\\d{8}T\\d{6}Z'
|
||||
const regexp = new RegExp(`^${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`)
|
||||
if (!regexp.test(backupId)) {
|
||||
throw new Error(`The id (${backupId}) not correspond to a metadata folder`)
|
||||
}
|
||||
|
||||
await this._handler.rmtree(backupId)
|
||||
}
|
||||
|
||||
async deleteOldMetadataBackups(dir, retention) {
|
||||
const handler = this.handler
|
||||
let list = await handler.list(dir)
|
||||
list.sort()
|
||||
list = list.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp)).slice(0, -retention)
|
||||
await asyncMapSettled(list, timestamp => handler.rmtree(`${dir}/${timestamp}`))
|
||||
}
|
||||
|
||||
async deleteFullVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
await asyncMapSettled(backups, ({ _filename, xva }) =>
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
}
|
||||
|
||||
async deleteVmBackup(filename) {
|
||||
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
|
||||
metadata._filename = filename
|
||||
|
||||
if (metadata.mode === 'delta') {
|
||||
await this.deleteDeltaVmBackups([metadata])
|
||||
} else if (metadata.mode === 'full') {
|
||||
await this.deleteFullVmBackups([metadata])
|
||||
} else {
|
||||
throw new Error(`no deleter for backup mode ${metadata.mode}`)
|
||||
}
|
||||
}
|
||||
|
||||
getDisk = Disposable.factory(this.getDisk)
|
||||
getDisk = deduped(this.getDisk, diskId => [diskId])
|
||||
getDisk = debounceResourceFactory(this.getDisk)
|
||||
async *getDisk(diskId) {
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
let max = 0
|
||||
let maxEntry
|
||||
const entries = await readdir(mountDir)
|
||||
entries.forEach(entry => {
|
||||
const matches = RE_VHDI.exec(entry)
|
||||
if (matches !== null) {
|
||||
const value = +matches[1]
|
||||
if (value > max) {
|
||||
max = value
|
||||
maxEntry = entry
|
||||
}
|
||||
}
|
||||
})
|
||||
if (max === 0) {
|
||||
throw new Error('no disks found')
|
||||
}
|
||||
|
||||
yield `${mountDir}/${maxEntry}`
|
||||
} finally {
|
||||
await fromCallback(execFile, 'fusermount', ['-uz', mountDir])
|
||||
}
|
||||
}
|
||||
|
||||
// partitionId values:
|
||||
//
|
||||
// - undefined: raw disk
|
||||
// - `<partitionId>`: partitioned disk
|
||||
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
|
||||
// - `/<vgName>/lvName>`: LVM on a raw disk
|
||||
getPartition = Disposable.factory(this.getPartition)
|
||||
async *getPartition(diskId, partitionId) {
|
||||
const devicePath = yield this.getDisk(diskId)
|
||||
if (partitionId === undefined) {
|
||||
return yield this._getPartition(devicePath)
|
||||
}
|
||||
|
||||
const isLvmPartition = partitionId.includes('/')
|
||||
if (isLvmPartition) {
|
||||
const [pvId, vgName, lvName] = partitionId.split('/')
|
||||
const lvs = yield this._getLvmLogicalVolumes(devicePath, pvId !== '' ? pvId : undefined, vgName)
|
||||
return yield this._getPartition(lvs.find(_ => _.lv_name === lvName).lv_path)
|
||||
}
|
||||
|
||||
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
backups[vmUuid] = vmBackups
|
||||
})
|
||||
|
||||
return backups
|
||||
}
|
||||
|
||||
listPartitionFiles(diskId, partitionId, path) {
|
||||
return using(this.getPartition(diskId, partitionId), async rootPath => {
|
||||
path = resolveSubpath(rootPath, path)
|
||||
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return entriesMap
|
||||
})
|
||||
}
|
||||
|
||||
listPartitions(diskId) {
|
||||
return using(this.getDisk(diskId), async devicePath => {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
|
||||
if (partitions.length === 0) {
|
||||
try {
|
||||
// handle potential raw LVM physical volume
|
||||
return await this._listLvmLogicalVolumes(devicePath, undefined, partitions)
|
||||
} catch (error) {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
const results = []
|
||||
await asyncMapSettled(partitions, partition =>
|
||||
partition.type === LVM_PARTITION_TYPE
|
||||
? this._listLvmLogicalVolumes(devicePath, partition, results)
|
||||
: results.push(partition)
|
||||
)
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
async listPoolMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listPoolMetadataBackups')
|
||||
|
||||
const backupsByPool = {}
|
||||
await asyncMap(await safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir), async poolId => {
|
||||
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
|
||||
return asyncMap(await safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listPoolMetadataBackups ${backupDir}`, {
|
||||
error,
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
)
|
||||
|
||||
// delete empty entries and sort backups
|
||||
Object.keys(backupsByPool).forEach(poolId => {
|
||||
const backups = backupsByPool[poolId]
|
||||
if (backups.length === 0) {
|
||||
delete backupsByPool[poolId]
|
||||
} else {
|
||||
backups.sort(compareTimestamp)
|
||||
}
|
||||
})
|
||||
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
const handler = this._handler
|
||||
const backups = []
|
||||
|
||||
try {
|
||||
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
|
||||
filter: isMetadataFile,
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(files, async file => {
|
||||
try {
|
||||
const metadata = await this.readVmBackupMetadata(file)
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
// inject an id usable by importVmBackupNg()
|
||||
metadata.id = metadata._filename
|
||||
|
||||
backups.push(metadata)
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`listVmBackups ${file}`, { error })
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
let code
|
||||
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async listXoMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
|
||||
|
||||
const backups = []
|
||||
await asyncMap(await safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listXoMetadataBackups ${backupDir}`, { error })
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async outputStream(input, path, { checksum = true, validator = noop } = {}) {
|
||||
const handler = this._handler
|
||||
input = await input
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await handler.createOutputStream(tmpPath, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
})
|
||||
try {
|
||||
await Promise.all([fromCallback(pump, input, output), output.checksumWritten, input.task])
|
||||
await validator(tmpPath)
|
||||
await handler.rename(tmpPath, path, { checksum })
|
||||
} catch (error) {
|
||||
await handler.unlink(tmpPath, { checksum })
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata) {
|
||||
const handler = this._handler
|
||||
const { vbds, vdis, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(vdis, async (vdi, id) => {
|
||||
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
})
|
||||
|
||||
return {
|
||||
streams,
|
||||
vbds,
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm,
|
||||
}
|
||||
}
|
||||
|
||||
readFullVmBackup(metadata) {
|
||||
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
|
||||
}
|
||||
}
|
||||
24
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
24
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
@@ -0,0 +1,24 @@
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.createTask('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return JSON.parse(String(await handler.readFile(`${backupId}/data.json`)))
|
||||
}
|
||||
}
|
||||
}
|
||||
117
@xen-orchestra/backups/_ContinuousReplicationWriter.js
Normal file
117
@xen-orchestra/backups/_ContinuousReplicationWriter.js
Normal file
@@ -0,0 +1,117 @@
|
||||
const asyncMapSettled = require('@xen-orchestra/async-map').default
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { importDeltaVm, TAG_COPY_SRC } = require('./_deltaVm')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms')
|
||||
const { Task } = require('./task')
|
||||
|
||||
exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
|
||||
constructor(backup, sr, settings) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
this._sr = sr
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: ({ deltaExport }) => ({
|
||||
id: sr.uuid,
|
||||
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
|
||||
type: 'SR',
|
||||
}),
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
)
|
||||
if (replicatedVm === undefined) {
|
||||
return baseUuidToSrcVdi.clear()
|
||||
}
|
||||
|
||||
const xapi = replicatedVm.$xapi
|
||||
const replicatedVdis = new Set(
|
||||
await asyncMap(await replicatedVm.$getDisks(), async vdiRef => {
|
||||
const otherConfig = await xapi.getField('VDI', vdiRef, 'other_config')
|
||||
return otherConfig[TAG_COPY_SRC]
|
||||
})
|
||||
)
|
||||
|
||||
for (const uuid of baseUuidToSrcVdi.keys()) {
|
||||
if (!replicatedVdis.has(uuid)) {
|
||||
baseUuidToSrcVdi.delete(uuid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async run({ timestamp, deltaExport, sizeContainers }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(
|
||||
asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => xapi.VM_destroy(vm.$ref))
|
||||
)
|
||||
|
||||
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await importDeltaVm(
|
||||
{
|
||||
__proto__: deltaExport,
|
||||
vm: {
|
||||
...deltaExport.vm,
|
||||
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
|
||||
},
|
||||
},
|
||||
sr
|
||||
)
|
||||
return {
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
|
||||
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
targetVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
|
||||
// these entries need to be added in case of offline backup
|
||||
'xo:backup:datetime': formatDateTime(timestamp),
|
||||
'xo:backup:job': job.id,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
}),
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
}
|
||||
210
@xen-orchestra/backups/_DeltaBackupWriter.js
Normal file
210
@xen-orchestra/backups/_DeltaBackupWriter.js
Normal file
@@ -0,0 +1,210 @@
|
||||
const assert = require('assert')
|
||||
const map = require('lodash/map')
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { dirname } = require('path')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { checkVhd } = require('./_checkVhd')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { getVmBackupDir } = require('./_getVmBackupDir')
|
||||
const { packUuid } = require('./_packUuid')
|
||||
const { Task } = require('./task')
|
||||
|
||||
const { warn } = createLogger('xo:proxy:backups:DeltaBackupWriter')
|
||||
|
||||
exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
constructor(backup, remoteId, settings) {
|
||||
this._adapter = backup.remoteAdapters[remoteId]
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: ({ deltaExport }) => ({
|
||||
id: remoteId,
|
||||
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
|
||||
type: 'remote',
|
||||
}),
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const backupDir = getVmBackupDir(backup.vm.uuid)
|
||||
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
try {
|
||||
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
|
||||
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
await checkVhdChain(handler, path)
|
||||
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
await ignoreErrors.call(handler.unlink(path))
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
}
|
||||
if (!found) {
|
||||
baseUuidToSrcVdi.delete(baseUuid)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async run({ timestamp, deltaExport, sizeContainers }) {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
const settings = this._settings
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
|
||||
// FIXME: implement optimized multiple VHDs merging with synthetic
|
||||
// delta
|
||||
//
|
||||
// For the time being, limit the number of deleted backups by run
|
||||
// because it can take a very long time and can lead to
|
||||
// interrupted backup with broken VHD chain.
|
||||
//
|
||||
// The old backups will be eventually merged in future runs of the
|
||||
// job.
|
||||
const { maxMergedDeltasPerRun } = this._settings
|
||||
if (oldBackups.length > maxMergedDeltasPerRun) {
|
||||
oldBackups.length = maxMergedDeltasPerRun
|
||||
}
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
Task.run({ name: 'merge' }, async () => {
|
||||
let size = 0
|
||||
// delete sequentially from newest to oldest to avoid unnecessary merges
|
||||
for (let i = oldBackups.length; i-- > 0; ) {
|
||||
size += await adapter.deleteDeltaVmBackups([oldBackups[i]])
|
||||
}
|
||||
return {
|
||||
size,
|
||||
}
|
||||
})
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
deltaExport.vdis,
|
||||
vdi =>
|
||||
`vdis/${jobId}/${
|
||||
vdi.type === 'suspend'
|
||||
? // doesn't make sense to group by parent for memory because we
|
||||
// don't do delta for it
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${basename}.vhd`
|
||||
)
|
||||
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
const metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
vbds: deltaExport.vbds,
|
||||
vdis: deltaExport.vdis,
|
||||
version: '2.0.0',
|
||||
vifs: deltaExport.vifs,
|
||||
vhds,
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
}
|
||||
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
const path = `${backupDir}/${vhds[id]}`
|
||||
|
||||
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
|
||||
let parentPath
|
||||
if (isDelta) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
)
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
await adapter.outputStream(deltaExport.streams[`${id}.vhd`], path, {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
})
|
||||
|
||||
if (isDelta) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
)
|
||||
return {
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
metadataContent.size = size
|
||||
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
85
@xen-orchestra/backups/_DisasterRecoveryWriter.js
Normal file
85
@xen-orchestra/backups/_DisasterRecoveryWriter.js
Normal file
@@ -0,0 +1,85 @@
|
||||
const asyncMapSettled = require('@xen-orchestra/async-map').default
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms')
|
||||
const { Task } = require('./task')
|
||||
|
||||
exports.DisasterRecoveryWriter = class DisasterRecoveryWriter {
|
||||
constructor(backup, sr, settings) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
this._sr = sr
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: {
|
||||
id: sr.uuid,
|
||||
type: 'SR',
|
||||
|
||||
// necessary?
|
||||
isFull: true,
|
||||
},
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(
|
||||
asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => xapi.VM_destroy(vm.$ref))
|
||||
)
|
||||
|
||||
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await xapi.VM_import(stream, sr.$ref, vm =>
|
||||
Promise.all([
|
||||
vm.add_tags('Disaster Recovery'),
|
||||
vm.ha_restart_priority !== '' && Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
|
||||
vm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
])
|
||||
)
|
||||
return { size: sizeContainer.size }
|
||||
})
|
||||
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
targetVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
|
||||
// these entries need to be added in case of offline backup
|
||||
'xo:backup:datetime': formatDateTime(timestamp),
|
||||
'xo:backup:job': job.id,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
}),
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
}
|
||||
90
@xen-orchestra/backups/_FullBackupWriter.js
Normal file
90
@xen-orchestra/backups/_FullBackupWriter.js
Normal file
@@ -0,0 +1,90 @@
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { getVmBackupDir } = require('./_getVmBackupDir')
|
||||
const { isValidXva } = require('./isValidXva')
|
||||
const { Task } = require('./task')
|
||||
|
||||
exports.FullBackupWriter = class FullBackupWriter {
|
||||
constructor(backup, remoteId, settings) {
|
||||
this._backup = backup
|
||||
this._remoteId = remoteId
|
||||
this._settings = settings
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
|
||||
// necessary?
|
||||
isFull: true,
|
||||
},
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
const backup = this._backup
|
||||
const remoteId = this._remoteId
|
||||
const settings = this._settings
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const adapter = backup.remoteAdapters[remoteId]
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'full' && _.scheduleId === scheduleId)
|
||||
)
|
||||
const deleteOldBackups = () => adapter.deleteFullVmBackups(oldBackups)
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
|
||||
const dataBasename = basename + '.xva'
|
||||
const dataFilename = backupDir + '/' + dataBasename
|
||||
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
const metadata = {
|
||||
jobId: job.id,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
version: '2.0.0',
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
xva: './' + dataBasename,
|
||||
}
|
||||
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
await adapter.outputStream(stream, dataFilename, {
|
||||
validator: tmpPath => {
|
||||
if (handler._getFilePath !== undefined) {
|
||||
return isValidXva(handler._getFilePath('/' + tmpPath))
|
||||
}
|
||||
},
|
||||
})
|
||||
return { size: sizeContainer.size }
|
||||
})
|
||||
metadata.size = sizeContainer.size
|
||||
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
74
@xen-orchestra/backups/_PoolMetadataBackup.js
Normal file
74
@xen-orchestra/backups/_PoolMetadataBackup.js
Normal file
@@ -0,0 +1,74 @@
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { Task } = require('./task')
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._pool = pool
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
_exportPoolMetadata() {
|
||||
const xapi = this._pool.$xapi
|
||||
return xapi.getResource(PATH_DB_DUMP, {
|
||||
task: xapi.createTask('Export pool metadata'),
|
||||
})
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule, _pool: pool } = this
|
||||
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${pool.$id}`
|
||||
const dir = `${poolDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const stream = await this._exportPoolMetadata()
|
||||
const fileName = `${dir}/data`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
pool,
|
||||
poolMaster: pool.$master,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
// forkStreamUnpipe should be used in a sync way, do not wait for a promise before using it
|
||||
await adapter.outputStream(forkStreamUnpipe(stream), fileName, { checksum: false })
|
||||
await adapter.handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode: this._config.dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(poolDir, this._settings.retentionPoolMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
350
@xen-orchestra/backups/_VmBackup.js
Normal file
350
@xen-orchestra/backups/_VmBackup.js
Normal file
@@ -0,0 +1,350 @@
|
||||
const findLast = require('lodash/findLast')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy')
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { ContinuousReplicationWriter } = require('./_ContinuousReplicationWriter')
|
||||
const { DeltaBackupWriter } = require('./_DeltaBackupWriter')
|
||||
const { DisasterRecoveryWriter } = require('./_DisasterRecoveryWriter')
|
||||
const { exportDeltaVm } = require('./_deltaVm')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
|
||||
const { FullBackupWriter } = require('./_FullBackupWriter')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { Task } = require('./task')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
|
||||
const { debug, warn } = createLogger('xo:proxy:backups:VmBackup')
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
exports.VmBackup = class VmBackup {
|
||||
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.remotes = remotes
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = []
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, ContinuousReplicationWriter]
|
||||
: [FullBackupWriter, DisasterRecoveryWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.push(new BackupWriter(this, remoteId, targetSettings))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.push(new ReplicationWriter(this, sr, targetSettings))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot = this._isDelta || vm.power_state === 'Running' || settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot'](
|
||||
this._getSnapshotNameLabel(vm)
|
||||
)
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired: this._fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, watchStreamSize)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await asyncMap(this._writers, async writer => {
|
||||
try {
|
||||
await writer.run({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
})
|
||||
} catch (error) {
|
||||
warn('copy failure', {
|
||||
error,
|
||||
target: writer.target,
|
||||
vm: this.vm,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await asyncMap(this._writers, async writer => {
|
||||
try {
|
||||
await writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
})
|
||||
} catch (error) {
|
||||
warn('copy failure', {
|
||||
error,
|
||||
target: writer.target,
|
||||
vm: this.vm,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
|
||||
|
||||
const { scheduleId } = this
|
||||
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
|
||||
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const xapi = this._xapi
|
||||
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
const writers = this._writers
|
||||
for (let i = 0, n = writers.length; presentBaseVdis.size !== 0 && i < n; ++i) {
|
||||
await writers[i].checkBaseVdis(presentBaseVdis, baseVm)
|
||||
}
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async run() {
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { _settings: settings, vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.length !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
}
|
||||
}
|
||||
61
@xen-orchestra/backups/_XoMetadataBackup.js
Normal file
61
@xen-orchestra/backups/_XoMetadataBackup.js
Normal file
@@ -0,0 +1,61 @@
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { Task } = require('./task')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule } = this
|
||||
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = JSON.stringify(job.xoMetadata, null, 2)
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(scheduleDir, this._settings.retentionXoMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
20
@xen-orchestra/backups/_cancelableMap.js
Normal file
20
@xen-orchestra/backups/_cancelableMap.js
Normal file
@@ -0,0 +1,20 @@
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
Array.from(iterable, function (item) {
|
||||
return callback.call(this, token, item)
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
await cancel()
|
||||
throw error
|
||||
}
|
||||
})
|
||||
5
@xen-orchestra/backups/_checkVhd.js
Normal file
5
@xen-orchestra/backups/_checkVhd.js
Normal file
@@ -0,0 +1,5 @@
|
||||
const Vhd = require('vhd-lib').default
|
||||
|
||||
exports.checkVhd = async function checkVhd(handler, path) {
|
||||
await new Vhd(handler, path).readHeaderAndFooter()
|
||||
}
|
||||
342
@xen-orchestra/backups/_deltaVm.js
Normal file
342
@xen-orchestra/backups/_deltaVm.js
Normal file
@@ -0,0 +1,342 @@
|
||||
const compareVersions = require('compare-versions')
|
||||
const defer = require('golike-defer').default
|
||||
const find = require('lodash/find')
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
|
||||
const { asyncMap } = require('./asyncMap')
|
||||
const { cancelableMap } = require('./_cancelableMap')
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
|
||||
exports.exportDeltaVm = async function exportDeltaVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
cancelToken = CancelToken.none,
|
||||
|
||||
// Sets of UUIDs of VDIs that must be exported as full.
|
||||
fullVdisRequired = new Set(),
|
||||
|
||||
disableBaseTags = false,
|
||||
} = {}
|
||||
) {
|
||||
// refs of VM's VDIs → base's VDIs.
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
let vdi, snapshotOf
|
||||
if ((vdi = vbd.$VDI) && (snapshotOf = vdi.$snapshot_of) && !fullVdisRequired.has(snapshotOf.uuid)) {
|
||||
baseVdis[vdi.snapshot_of] = vdi
|
||||
}
|
||||
})
|
||||
|
||||
const streams = {}
|
||||
const vdis = {}
|
||||
const vbds = {}
|
||||
await cancelableMap(cancelToken, vm.$VBDs, async (cancelToken, vbd) => {
|
||||
let vdi
|
||||
if (vbd.type !== 'Disk' || !(vdi = vbd.$VDI)) {
|
||||
// Ignore this VBD.
|
||||
return
|
||||
}
|
||||
|
||||
// If the VDI name start with `[NOBAK]`, do not export it.
|
||||
if (vdi.name_label.startsWith('[NOBAK]')) {
|
||||
// FIXME: find a way to not create the VDI snapshot in the
|
||||
// first time.
|
||||
//
|
||||
// The snapshot must not exist otherwise it could break the
|
||||
// next export.
|
||||
ignoreErrors.call(vdi.$destroy())
|
||||
return
|
||||
}
|
||||
|
||||
vbds[vbd.$ref] = vbd
|
||||
|
||||
const vdiRef = vdi.$ref
|
||||
if (vdiRef in vdis) {
|
||||
// This VDI has already been managed.
|
||||
return
|
||||
}
|
||||
|
||||
// Look for a snapshot of this vdi in the base VM.
|
||||
const baseVdi = baseVdis[vdi.snapshot_of]
|
||||
|
||||
vdis[vdiRef] = {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
|
||||
},
|
||||
$snapshot_of$uuid: vdi.$snapshot_of?.uuid,
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
|
||||
streams[`${vdiRef}.vhd`] = await vdi.$exportContent({
|
||||
baseRef: baseVdi?.$ref,
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
})
|
||||
})
|
||||
|
||||
const suspendVdi = vm.$suspend_VDI
|
||||
if (suspendVdi !== undefined) {
|
||||
const vdiRef = suspendVdi.$ref
|
||||
vdis[vdiRef] = {
|
||||
...suspendVdi,
|
||||
$SR$uuid: suspendVdi.$SR.uuid,
|
||||
}
|
||||
streams[`${vdiRef}.vhd`] = await suspendVdi.$exportContent({
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
})
|
||||
}
|
||||
|
||||
const vifs = {}
|
||||
vm.$VIFs.forEach(vif => {
|
||||
const network = vif.$network
|
||||
vifs[vif.$ref] = {
|
||||
...vif,
|
||||
$network$uuid: network.uuid,
|
||||
$network$name_label: network.name_label,
|
||||
$network$VLAN: network.$PIFs[0]?.VLAN,
|
||||
}
|
||||
})
|
||||
|
||||
return Object.defineProperty(
|
||||
{
|
||||
version: '1.1.0',
|
||||
vbds,
|
||||
vdis,
|
||||
vifs,
|
||||
vm: {
|
||||
...vm,
|
||||
other_config:
|
||||
baseVm && !disableBaseTags
|
||||
? {
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
: omit(vm.other_config, TAG_BASE_DELTA),
|
||||
},
|
||||
},
|
||||
'streams',
|
||||
{
|
||||
configurable: true,
|
||||
value: streams,
|
||||
writable: true,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
$defer,
|
||||
deltaVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {} } = {}
|
||||
) {
|
||||
const { version } = deltaVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const vmRecord = deltaVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
const vdi = vbd.$VDI
|
||||
if (vdi !== undefined) {
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = deltaVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
let suspendVdi
|
||||
if (vmRecord.power_state === 'Suspended') {
|
||||
const vdi = vdiRecords[vmRecord.suspend_VDI]
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
}
|
||||
|
||||
// 1. Create the VM.
|
||||
const vmRef = await xapi.VM_create(
|
||||
{
|
||||
...vmRecord,
|
||||
affinity: undefined,
|
||||
blocked_operations: {
|
||||
...vmRecord.blocked_operations,
|
||||
start: 'Importing…',
|
||||
},
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: '[Importing…] ' + vmRecord.name_label,
|
||||
other_config: {
|
||||
...vmRecord.other_config,
|
||||
[TAG_COPY_SRC]: vmRecord.uuid,
|
||||
},
|
||||
},
|
||||
{
|
||||
suspend_VDI: suspendVdi?.$ref,
|
||||
}
|
||||
)
|
||||
$defer.onFailure.call(xapi, 'VM_destroy', vmRef)
|
||||
|
||||
// 2. Delete all VBDs which may have been created by the import.
|
||||
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
|
||||
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbdRecords = deltaVm.vbds
|
||||
const vbds = groupBy(vbdRecords, 'VDI')
|
||||
const newVdis = {}
|
||||
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
|
||||
const vdi = vdiRecords[vdiRef]
|
||||
let newVdi
|
||||
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVdiUuid) {
|
||||
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
|
||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||
} else if (vdiRef === vmRecord.suspend_VDI) {
|
||||
// suspendVDI has already created
|
||||
newVdi = suspendVdi
|
||||
} else {
|
||||
newVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
SR: mapVdisSrs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
}
|
||||
|
||||
const vdiVbds = vbds[vdiRef]
|
||||
if (vdiVbds !== undefined) {
|
||||
await asyncMap(Object.values(vdiVbds), vbd =>
|
||||
xapi.VBD_create({
|
||||
...vbd,
|
||||
VDI: newVdi.$ref,
|
||||
VM: vmRef,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
newVdis[vdiRef] = newVdi
|
||||
})
|
||||
|
||||
const networksByNameLabelByVlan = {}
|
||||
let defaultNetwork
|
||||
Object.values(xapi.objects.all).forEach(object => {
|
||||
if (object.$type === 'network') {
|
||||
const pif = object.$PIFs[0]
|
||||
if (pif === undefined) {
|
||||
// ignore network
|
||||
return
|
||||
}
|
||||
const vlan = pif.VLAN
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan] || (networksByNameLabelByVlan[vlan] = {})
|
||||
defaultNetwork = networksByNameLabel[object.name_label] = object
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = deltaVm
|
||||
|
||||
await Promise.all([
|
||||
// Import VDI contents.
|
||||
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
if (stream.length === undefined) {
|
||||
stream = await createVhdStreamWithLength(stream)
|
||||
}
|
||||
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
||||
}
|
||||
}),
|
||||
|
||||
// Wait for VDI export tasks (if any) termination.
|
||||
Promise.all(Object.values(streams).map(stream => stream.task)),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
const { $network$VLAN: vlan = -1 } = vif
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan]
|
||||
if (networksByNameLabel !== undefined) {
|
||||
network = networksByNameLabel[vif.$network$name_label]
|
||||
if (network === undefined) {
|
||||
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
|
||||
}
|
||||
} else {
|
||||
network = defaultNetwork
|
||||
}
|
||||
}
|
||||
|
||||
if (network) {
|
||||
return xapi.VIF_create({
|
||||
...vif,
|
||||
network: network.$ref,
|
||||
VM: vmRef,
|
||||
})
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
await Promise.all([
|
||||
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
|
||||
])
|
||||
|
||||
return vmRef
|
||||
})
|
||||
@@ -1,4 +1,4 @@
|
||||
function extractIdsFromSimplePattern(pattern) {
|
||||
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
|
||||
if (pattern === undefined) {
|
||||
return []
|
||||
}
|
||||
@@ -27,4 +27,3 @@ function extractIdsFromSimplePattern(pattern) {
|
||||
|
||||
throw new Error('invalid pattern')
|
||||
}
|
||||
exports.extractIdsFromSimplePattern = extractIdsFromSimplePattern
|
||||
28
@xen-orchestra/backups/_forkStreamUnpipe.js
Normal file
28
@xen-orchestra/backups/_forkStreamUnpipe.js
Normal file
@@ -0,0 +1,28 @@
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, _ => {
|
||||
stream.forks--
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
return proxy
|
||||
}
|
||||
4
@xen-orchestra/backups/_getOldEntries.js
Normal file
4
@xen-orchestra/backups/_getOldEntries.js
Normal file
@@ -0,0 +1,4 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
20
@xen-orchestra/backups/_getTmpDir.js
Normal file
20
@xen-orchestra/backups/_getTmpDir.js
Normal file
@@ -0,0 +1,20 @@
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
await mkdir(path)
|
||||
return new Disposable(path, () => rmdir(path))
|
||||
} catch (error) {
|
||||
if (i === MAX_ATTEMPTS) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
6
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
6
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
52
@xen-orchestra/backups/_listPartitions.js
Normal file
52
@xen-orchestra/backups/_listPartitions.js
Normal file
@@ -0,0 +1,52 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
const { debug } = createLogger('xo:proxy:api')
|
||||
|
||||
const IGNORED_PARTITION_TYPES = {
|
||||
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
|
||||
0x05: true,
|
||||
0x0f: true,
|
||||
0x15: true,
|
||||
0x5e: true,
|
||||
0x5f: true,
|
||||
0x85: true,
|
||||
0x91: true,
|
||||
0x9b: true,
|
||||
0xc5: true,
|
||||
0xcf: true,
|
||||
0xd5: true,
|
||||
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
valueTransform: (value, key) => (key === 'start' || key === 'size' || key === 'type' ? +value : value),
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
devicePath,
|
||||
]).catch(error => {
|
||||
// partx returns 1 since v2.33 when failing to read partitions.
|
||||
//
|
||||
// Prior versions are correctly handled by the nominal case.
|
||||
debug('listPartitions', { error })
|
||||
return ''
|
||||
})
|
||||
|
||||
return parts
|
||||
.split(/\r?\n/)
|
||||
.map(parsePartxLine)
|
||||
.filter(({ type }) => type != null && !(type in IGNORED_PARTITION_TYPES))
|
||||
}
|
||||
30
@xen-orchestra/backups/_listReplicatedVms.js
Normal file
30
@xen-orchestra/backups/_listReplicatedVms.js
Normal file
@@ -0,0 +1,30 @@
|
||||
const getReplicatedVmDatetime = vm => {
|
||||
const { 'xo:backup:datetime': datetime = vm.name_label.slice(-17, -1) } = vm.other_config
|
||||
return datetime
|
||||
}
|
||||
|
||||
const compareReplicatedVmDatetime = (a, b) => (getReplicatedVmDatetime(a) < getReplicatedVmDatetime(b) ? -1 : 1)
|
||||
|
||||
exports.listReplicatedVms = function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
|
||||
const { all } = xapi.objects
|
||||
const vms = {}
|
||||
for (const key in all) {
|
||||
const object = all[key]
|
||||
const oc = object.other_config
|
||||
if (
|
||||
object.$type === 'VM' &&
|
||||
!object.is_a_snapshot &&
|
||||
!object.is_a_template &&
|
||||
'start' in object.blocked_operations &&
|
||||
(oc['xo:backup:job'] === scheduleOrJobId || oc['xo:backup:schedule'] === scheduleOrJobId) &&
|
||||
oc['xo:backup:sr'] === srUuid &&
|
||||
(oc['xo:backup:vm'] === vmUuid ||
|
||||
// 2018-03-28, JFT: to catch VMs replicated before this fix
|
||||
oc['xo:backup:vm'] === undefined)
|
||||
) {
|
||||
vms[object.$id] = object
|
||||
}
|
||||
}
|
||||
|
||||
return Object.values(vms).sort(compareReplicatedVmDatetime)
|
||||
}
|
||||
29
@xen-orchestra/backups/_lvm.js
Normal file
29
@xen-orchestra/backups/_lvm.js
Normal file
@@ -0,0 +1,29 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
const info = await fromCallback(execFile, command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
return info
|
||||
.trim()
|
||||
.split(/\r?\n/)
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
5
@xen-orchestra/backups/_packUuid.js
Normal file
5
@xen-orchestra/backups/_packUuid.js
Normal file
@@ -0,0 +1,5 @@
|
||||
const PARSE_UUID_RE = /-/g
|
||||
|
||||
exports.packUuid = function packUuid(uuid) {
|
||||
return Buffer.from(uuid.replace(PARSE_UUID_RE, ''), 'hex')
|
||||
}
|
||||
46
@xen-orchestra/backups/_syncThenable.js
Normal file
46
@xen-orchestra/backups/_syncThenable.js
Normal file
@@ -0,0 +1,46 @@
|
||||
function fulfilledThen(cb) {
|
||||
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
|
||||
}
|
||||
|
||||
function rejectedThen(_, cb) {
|
||||
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
|
||||
}
|
||||
|
||||
class SyncThenable {
|
||||
static resolve(value) {
|
||||
if (value != null && typeof value.then === 'function') {
|
||||
return value
|
||||
}
|
||||
|
||||
return new this(false, value)
|
||||
}
|
||||
|
||||
static fromFunction(fn, ...arg) {
|
||||
try {
|
||||
return this.resolve(fn(...arg))
|
||||
} catch (error) {
|
||||
return this.reject(error)
|
||||
}
|
||||
}
|
||||
|
||||
static reject(reason) {
|
||||
return new this(true, reason)
|
||||
}
|
||||
|
||||
// unwrap if it's a SyncThenable
|
||||
static tryUnwrap(value) {
|
||||
if (value instanceof this) {
|
||||
if (value.then === rejectedThen) {
|
||||
throw value.value
|
||||
}
|
||||
return value.value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
constructor(rejected, value) {
|
||||
this.then = rejected ? rejectedThen : fulfilledThen
|
||||
this.value = value
|
||||
}
|
||||
}
|
||||
exports.SyncThenable = SyncThenable
|
||||
@@ -1,11 +1,8 @@
|
||||
exports.watchStreamSize = stream => {
|
||||
exports.watchStreamSize = function watchStreamSize(stream) {
|
||||
const container = { size: 0 }
|
||||
const isPaused = stream.isPaused()
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
if (isPaused) {
|
||||
stream.pause()
|
||||
}
|
||||
stream.pause()
|
||||
return container
|
||||
}
|
||||
6
@xen-orchestra/backups/asyncMap.js
Normal file
6
@xen-orchestra/backups/asyncMap.js
Normal file
@@ -0,0 +1,6 @@
|
||||
// Similar to Promise.all + Array#map but supports all iterables and does not trigger ESLint array-callback-return
|
||||
//
|
||||
// WARNING: Does not handle plain objects
|
||||
exports.asyncMap = function asyncMap(arrayLike, mapFn, thisArg) {
|
||||
return Promise.all(Array.from(arrayLike, mapFn, thisArg))
|
||||
}
|
||||
28
@xen-orchestra/backups/formatVmBackup.js
Normal file
28
@xen-orchestra/backups/formatVmBackup.js
Normal file
@@ -0,0 +1,28 @@
|
||||
const { dirname } = require('path')
|
||||
|
||||
exports.formatVmBackup = backup => {
|
||||
return {
|
||||
disks:
|
||||
backup.vhds === undefined
|
||||
? []
|
||||
: Object.keys(backup.vhds).map(vdiId => {
|
||||
const vdi = backup.vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${backup.vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
}),
|
||||
|
||||
id: backup.id,
|
||||
jobId: backup.jobId,
|
||||
mode: backup.mode,
|
||||
scheduleId: backup.scheduleId,
|
||||
size: backup.size,
|
||||
timestamp: backup.timestamp,
|
||||
vm: {
|
||||
name_description: backup.vm.name_description,
|
||||
name_label: backup.vm.name_label,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = (retention, entries) =>
|
||||
entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
@@ -8,16 +8,32 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.4.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=14.5"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@vates/disposable": "^0.0.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"@xen-orchestra/xapi": "^0.4.1",
|
||||
"compare-versions": "^3.6.0",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"fs-extra": "^9.0.0"
|
||||
"end-of-stream": "^1.4.4",
|
||||
"ensure-array": "^1.0.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.20",
|
||||
"node-zone": "^0.4.0",
|
||||
"parse-pairs": "^1.1.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"vhd-lib": "^1.0.0"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
174
@xen-orchestra/backups/task.js
Normal file
174
@xen-orchestra/backups/task.js
Normal file
@@ -0,0 +1,174 @@
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const { SyncThenable } = require('./_syncThenable')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
}
|
||||
|
||||
// Create a serializable object from an error.
|
||||
//
|
||||
// Otherwise some fields might be non-enumerable and missing from logs.
|
||||
const serializeError = error =>
|
||||
error instanceof Error
|
||||
? {
|
||||
...error, // Copy enumerable properties.
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
name: error.name,
|
||||
stack: error.stack,
|
||||
}
|
||||
: error
|
||||
exports.serializeError = serializeError
|
||||
|
||||
class TaskLogger {
|
||||
constructor(logFn, parentId) {
|
||||
this._log = logFn
|
||||
this._parentId = parentId
|
||||
this._taskId = undefined
|
||||
}
|
||||
|
||||
get taskId() {
|
||||
const taskId = this._taskId
|
||||
if (taskId === undefined) {
|
||||
throw new Error('start the task first')
|
||||
}
|
||||
return taskId
|
||||
}
|
||||
|
||||
// create a subtask
|
||||
fork() {
|
||||
return new TaskLogger(this._log, this.taskId)
|
||||
}
|
||||
|
||||
info(message, data) {
|
||||
return this._log({
|
||||
data,
|
||||
event: 'info',
|
||||
message,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
run(message, data, fn) {
|
||||
if (arguments.length === 2) {
|
||||
fn = data
|
||||
data = undefined
|
||||
}
|
||||
|
||||
return SyncThenable.tryUnwrap(
|
||||
SyncThenable.fromFunction(() => {
|
||||
if (this._taskId !== undefined) {
|
||||
throw new Error('task has already started')
|
||||
}
|
||||
|
||||
this._taskId = Math.random().toString(36).slice(2)
|
||||
|
||||
return this._log({
|
||||
data,
|
||||
event: 'start',
|
||||
message,
|
||||
parentId: this._parentId,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
.then(fn)
|
||||
.then(
|
||||
result => {
|
||||
const log = this._log
|
||||
this._log = logAfterEnd
|
||||
return SyncThenable.resolve(
|
||||
log({
|
||||
event: 'end',
|
||||
result,
|
||||
status: 'success',
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
).then(() => result)
|
||||
},
|
||||
error => {
|
||||
const log = this._log
|
||||
this._log = logAfterEnd
|
||||
return SyncThenable.resolve(
|
||||
log({
|
||||
event: 'end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
).then(() => {
|
||||
throw error
|
||||
})
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
warning(message, data) {
|
||||
return this._log({
|
||||
data,
|
||||
event: 'warning',
|
||||
message,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
wrapFn(fn, message, data) {
|
||||
const logger = this
|
||||
return function () {
|
||||
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
|
||||
|
||||
return logger.run(evaluate(message), evaluate(data), () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const $$task = Symbol('current task logger')
|
||||
|
||||
const getCurrent = () => Zone.current.data[$$task]
|
||||
|
||||
const Task = {
|
||||
info(message, data) {
|
||||
const task = getCurrent()
|
||||
if (task !== undefined) {
|
||||
return task.info(message, data)
|
||||
}
|
||||
},
|
||||
|
||||
run({ name, data, onLog }, fn) {
|
||||
let parentId
|
||||
if (onLog === undefined) {
|
||||
const parent = getCurrent()
|
||||
if (parent === undefined) {
|
||||
return fn()
|
||||
}
|
||||
onLog = parent._log
|
||||
parentId = parent.taskId
|
||||
}
|
||||
|
||||
const task = new TaskLogger(onLog, parentId)
|
||||
const zone = Zone.current.fork('task')
|
||||
zone.data[$$task] = task
|
||||
return task.run(name, data, zone.wrap(fn))
|
||||
},
|
||||
|
||||
warning(message, data) {
|
||||
const task = getCurrent()
|
||||
if (task !== undefined) {
|
||||
return task.warning(message, data)
|
||||
}
|
||||
},
|
||||
|
||||
wrapFn({ name, data, onLog }, fn) {
|
||||
return function () {
|
||||
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
|
||||
return Task.run({ name: evaluate(name), data: evaluate(data), onLog }, () => fn.apply(this, arguments))
|
||||
}
|
||||
},
|
||||
}
|
||||
exports.Task = Task
|
||||
1
@xen-orchestra/proxy-cli/.babelrc.js
Normal file
1
@xen-orchestra/proxy-cli/.babelrc.js
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
24
@xen-orchestra/proxy-cli/.npmignore
Normal file
24
@xen-orchestra/proxy-cli/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
46
@xen-orchestra/proxy-cli/README.md
Normal file
46
@xen-orchestra/proxy-cli/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/proxy-cli
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/proxy-cli)  [](https://bundlephobia.com/result?p=@xen-orchestra/proxy-cli) [](https://npmjs.org/package/@xen-orchestra/proxy-cli)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/proxy-cli):
|
||||
|
||||
```
|
||||
> npm install --global @xen-orchestra/proxy-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
$ xo-proxy-cli --help
|
||||
|
||||
Usage:
|
||||
|
||||
xo-proxy-cli <method> [<param>=<value>]...
|
||||
Call a method of the API and display its result.
|
||||
|
||||
xo-proxy-cli [--file | -f] <file>
|
||||
Read a CSON or JSON file containing an object with `method` and `params`
|
||||
properties and call the API method.
|
||||
|
||||
The file can also contain an array containing multiple calls, which will be
|
||||
run in sequence.
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
15
@xen-orchestra/proxy-cli/USAGE.md
Normal file
15
@xen-orchestra/proxy-cli/USAGE.md
Normal file
@@ -0,0 +1,15 @@
|
||||
```
|
||||
$ xo-proxy-cli --help
|
||||
|
||||
Usage:
|
||||
|
||||
xo-proxy-cli <method> [<param>=<value>]...
|
||||
Call a method of the API and display its result.
|
||||
|
||||
xo-proxy-cli [--file | -f] <file>
|
||||
Read a CSON or JSON file containing an object with `method` and `params`
|
||||
properties and call the API method.
|
||||
|
||||
The file can also contain an array containing multiple calls, which will be
|
||||
run in sequence.
|
||||
```
|
||||
68
@xen-orchestra/proxy-cli/package.json
Normal file
68
@xen-orchestra/proxy-cli/package.json
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/proxy-cli",
|
||||
"version": "0.2.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
"backup",
|
||||
"proxy",
|
||||
"xen-orchestra",
|
||||
"xo"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/proxy-cli",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/proxy-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {
|
||||
"xo-proxy-cli": "dist/index.js"
|
||||
},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.0",
|
||||
"@vates/read-chunk": "^0.1.1",
|
||||
"app-conf": "^0.7.0",
|
||||
"content-type": "^1.0.4",
|
||||
"cson-parser": "^4.0.7",
|
||||
"getopts": "^2.2.3",
|
||||
"http-request-plus": "^0.9.1",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"promise-toolbox": "^0.15.1",
|
||||
"pump": "^3.0.0",
|
||||
"pumpify": "^2.0.1",
|
||||
"split2": "^3.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.7.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
177
@xen-orchestra/proxy-cli/src/index.js
Executable file
177
@xen-orchestra/proxy-cli/src/index.js
Executable file
@@ -0,0 +1,177 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import assert from 'assert'
|
||||
import contentType from 'content-type'
|
||||
import CSON from 'cson-parser'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fs from 'fs'
|
||||
import getopts from 'getopts'
|
||||
import hrp from 'http-request-plus'
|
||||
import pump from 'pump'
|
||||
import split2 from 'split2'
|
||||
import pumpify from 'pumpify'
|
||||
import { extname, join } from 'path'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
import { inspect } from 'util'
|
||||
import { load as loadConfig } from 'app-conf'
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
|
||||
import pkg from '../package.json'
|
||||
|
||||
const FORMATS = {
|
||||
__proto__: null,
|
||||
|
||||
cson: CSON.parse,
|
||||
json: JSON.parse,
|
||||
}
|
||||
|
||||
const parseValue = value => (value.startsWith('json:') ? JSON.parse(value.slice(5)) : value)
|
||||
|
||||
async function main(argv) {
|
||||
const config = await loadConfig('xo-proxy', {
|
||||
appDir: join(__dirname, '..'),
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
|
||||
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
|
||||
|
||||
const { _: args, file, help, host, raw, token } = getopts(argv, {
|
||||
alias: { file: 'f', help: 'h' },
|
||||
boolean: ['help', 'raw'],
|
||||
default: {
|
||||
token: config.authenticationToken,
|
||||
},
|
||||
stopEarly: true,
|
||||
string: ['file', 'host', 'token'],
|
||||
})
|
||||
|
||||
if (help || (file === '' && args.length === 0)) {
|
||||
return console.log(
|
||||
'%s',
|
||||
`Usage:
|
||||
|
||||
xo-proxy-cli <method> [<param>=<value>]...
|
||||
Call a method of the API and display its result.
|
||||
|
||||
xo-proxy-cli [--file | -f] <file>
|
||||
Read a CSON or JSON file containing an object with \`method\` and \`params\`
|
||||
properties and call the API method.
|
||||
|
||||
The file can also contain an array containing multiple calls, which will be
|
||||
run in sequence.
|
||||
|
||||
${pkg.name} v${pkg.version}`
|
||||
)
|
||||
}
|
||||
|
||||
const call = async (method, params) => {
|
||||
const request = {
|
||||
body: format.request(0, method, params),
|
||||
headers: {
|
||||
'content-type': 'application/json',
|
||||
cookie: `authenticationToken=${token}`,
|
||||
},
|
||||
pathname: '/api/v1',
|
||||
protocol: 'https:',
|
||||
rejectUnauthorized: false,
|
||||
}
|
||||
if (host !== '') {
|
||||
request.host = host
|
||||
} else {
|
||||
request.hostname = hostname
|
||||
request.port = port
|
||||
}
|
||||
|
||||
const response = await hrp.post(request)
|
||||
|
||||
const { stdout } = process
|
||||
|
||||
const responseType = contentType.parse(response).type
|
||||
if (responseType === 'application/octet-stream') {
|
||||
if (stdout.isTTY) {
|
||||
throw new Error('binary data, pipe to a file!')
|
||||
}
|
||||
await fromCallback(pump, response, stdout)
|
||||
return
|
||||
}
|
||||
|
||||
assert.strictEqual(responseType, 'application/json')
|
||||
const lines = pumpify.obj(response, split2())
|
||||
|
||||
const firstLine = await readChunk(lines)
|
||||
|
||||
try {
|
||||
const result = await parse.result(firstLine)
|
||||
if (
|
||||
result !== null &&
|
||||
typeof result === 'object' &&
|
||||
Object.keys(result).length === 1 &&
|
||||
result.$responseType === 'ndjson'
|
||||
) {
|
||||
let line
|
||||
while ((line = await readChunk(lines)) !== null) {
|
||||
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
|
||||
stdout.write('\n')
|
||||
}
|
||||
} else if (raw && typeof result === 'string') {
|
||||
stdout.write(result)
|
||||
} else {
|
||||
stdout.write(inspect(result, { colors: true, depth: null }))
|
||||
stdout.write('\n')
|
||||
}
|
||||
} catch (error) {
|
||||
if (!(error?.code === 10 && 'errors' in error.data)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
// we should be able to do better but the messages returned by ajv are not
|
||||
// precise enough
|
||||
//
|
||||
// see https://github.com/epoberezkin/ajv/issues/1099
|
||||
throw error.data.errors
|
||||
}
|
||||
}
|
||||
|
||||
if (file !== '') {
|
||||
let data = fs.readFileSync(file, 'utf8')
|
||||
const ext = extname(file).slice(1).toLowerCase()
|
||||
const parse = FORMATS[ext]
|
||||
if (parse === undefined) {
|
||||
throw new Error(`unsupported file: ${file}`)
|
||||
}
|
||||
data = parse(data)
|
||||
if (!Array.isArray(data)) {
|
||||
data = [data]
|
||||
}
|
||||
|
||||
for (let i = 0, n = data.length; i < n; ++i) {
|
||||
process.stderr.write(`\n${i}-th call...\n`)
|
||||
|
||||
const { method, params } = data[i]
|
||||
await call(method, params)
|
||||
}
|
||||
} else {
|
||||
const method = args[0]
|
||||
const params = {}
|
||||
for (let i = 1, n = args.length; i < n; ++i) {
|
||||
const param = args[i]
|
||||
const j = param.indexOf('=')
|
||||
if (j === -1) {
|
||||
throw new Error(`invalid param format: ${param}`)
|
||||
}
|
||||
params[param.slice(0, j)] = parseValue(param.slice(j + 1))
|
||||
}
|
||||
|
||||
await call(method, params)
|
||||
}
|
||||
}
|
||||
main(process.argv.slice(2)).then(
|
||||
() => {
|
||||
process.exit(0)
|
||||
},
|
||||
error => {
|
||||
console.error('exception in main:', error)
|
||||
|
||||
process.exit(1)
|
||||
}
|
||||
)
|
||||
1
@xen-orchestra/proxy/.babelrc.js
Normal file
1
@xen-orchestra/proxy/.babelrc.js
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
24
@xen-orchestra/proxy/.npmignore
Normal file
24
@xen-orchestra/proxy/.npmignore
Normal file
@@ -0,0 +1,24 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
18
@xen-orchestra/proxy/README.md
Normal file
18
@xen-orchestra/proxy/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/proxy
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
0
@xen-orchestra/proxy/USAGE.md
Normal file
0
@xen-orchestra/proxy/USAGE.md
Normal file
80
@xen-orchestra/proxy/config.toml
Normal file
80
@xen-orchestra/proxy/config.toml
Normal file
@@ -0,0 +1,80 @@
|
||||
# Vendor config: DO NOT TOUCH!
|
||||
#
|
||||
# See sample.config.toml to override.
|
||||
|
||||
# This secret string is used to authenticate clients to the API.
|
||||
#
|
||||
# It must be defined to a non-empty string for the proxy to run.
|
||||
authenticationToken = ''
|
||||
|
||||
datadir = '/var/lib/xo-proxy/data'
|
||||
resourceDebounce = '5m'
|
||||
|
||||
[api]
|
||||
keepAliveInterval = 10e3
|
||||
|
||||
[backups]
|
||||
# Mode to use for newly created backup directories
|
||||
#
|
||||
# https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation
|
||||
dirMode = 0o700
|
||||
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
|
||||
|
||||
[backups.defaultSettings]
|
||||
reportWhen = 'failure'
|
||||
|
||||
[backups.metadata.defaultSettings]
|
||||
retentionPoolMetadata = 0
|
||||
retentionXoMetadata = 0
|
||||
|
||||
[backups.vm.defaultSettings]
|
||||
bypassVdiChainsCheck = false
|
||||
checkpointSnapshot = false
|
||||
concurrency = 2
|
||||
deleteFirst = false
|
||||
exportRetention = 0
|
||||
fullInterval = 0
|
||||
offlineBackup = false
|
||||
offlineSnapshot = false
|
||||
snapshotRetention = 0
|
||||
timeout = 0
|
||||
vmTimeout = 0
|
||||
|
||||
# This is a work-around.
|
||||
#
|
||||
# See https://github.com/vatesfr/xen-orchestra/pull/4674
|
||||
maxMergedDeltasPerRun = 2
|
||||
|
||||
# Each `http.listen.<name>` entry defines a specific listening configuration for
|
||||
# the HTTP server.
|
||||
#
|
||||
# `<name>` can be freely choosen.
|
||||
[http.listen.https]
|
||||
|
||||
# Generate self-signed certificate if missing
|
||||
autoCert = true
|
||||
|
||||
cert = '/var/lib/xo-proxy/certificate.pem'
|
||||
key = '/var/lib/xo-proxy/key.pem'
|
||||
port = 443
|
||||
|
||||
[remoteOptions]
|
||||
mountsDir = '/run/xo-proxy/mounts'
|
||||
|
||||
# timeout in milliseconds (set to 0 to disable)
|
||||
timeout = 600e3
|
||||
|
||||
# see https:#github.com/vatesfr/xen-orchestra/issues/3419
|
||||
# useSudo = false
|
||||
|
||||
[remotes]
|
||||
disableFileRemotes = true
|
||||
|
||||
[xapiOptions]
|
||||
# VDIs with `[NOBAK]` flag can be ignored while snapshotting an halted VM.
|
||||
#
|
||||
# This is disabled by default for the time being but will be turned on after enough testing.
|
||||
ignoreNobakVdis = false
|
||||
|
||||
maxUncoalescedVdis = 1
|
||||
watchEvents = ['network', 'PIF', 'pool', 'SR', 'task', 'VBD', 'VDI', 'VIF', 'VM']
|
||||
191
@xen-orchestra/proxy/docs/api.md
Normal file
191
@xen-orchestra/proxy/docs/api.md
Normal file
@@ -0,0 +1,191 @@
|
||||
## Transport
|
||||
|
||||
The API is based on line-delimited [JSON-RPC
|
||||
2.0](https://www.jsonrpc.org/specification) over HTTP.
|
||||
|
||||
### Authentication
|
||||
|
||||
A valid authentication token should be attached as a cookie to all HTTP
|
||||
requests:
|
||||
|
||||
```http
|
||||
POST /api/v1 HTTP/1.1
|
||||
Cookie: authenticationToken=TN2YBOMYtXB_hHtf4wTzm9p5tTuqq2i15yeuhcz2xXM
|
||||
```
|
||||
|
||||
The server will respond to an invalid token with a `401 Unauthorized` status.
|
||||
|
||||
The server can request the client to update its token with a `Set-Cookie` header:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Set-Cookie: authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs
|
||||
```
|
||||
|
||||
### Remote Procedure Call
|
||||
|
||||
#### Request
|
||||
|
||||
A call is a JSON-RPC request over a POST HTTP request:
|
||||
|
||||
```http
|
||||
POST /api/v1 HTTP/1.1
|
||||
Host: proxy1.xo.company.tld
|
||||
Content-Type: application/json
|
||||
Content-Length: 69
|
||||
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
##### JSON-RPC response
|
||||
|
||||
Plain JSON-RPC response
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
NDJSON Response
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
|
||||
```
|
||||
|
||||
##### Binary response
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
```
|
||||
|
||||
## Methods
|
||||
|
||||
```ts
|
||||
interface Remote {
|
||||
url: string
|
||||
options?: string
|
||||
}
|
||||
|
||||
declare namespace system {
|
||||
function listMethods(): string[]
|
||||
function methodSignature(_: { name: string }): { params: { [string]: object } }
|
||||
}
|
||||
|
||||
declare namespace event {
|
||||
interface Event {
|
||||
class: 'Task'
|
||||
operation: 'add' | 'mod' | 'del'
|
||||
snapshot: Task
|
||||
}
|
||||
|
||||
function from(_: {
|
||||
token: string = ''
|
||||
timeout?: number
|
||||
}): {
|
||||
events: Event[]
|
||||
token: string
|
||||
}
|
||||
}
|
||||
|
||||
declare namespace backup {
|
||||
type SimpleIdPattern = { id: string | { __or: string[] } }
|
||||
|
||||
interface BackupJob {
|
||||
id: string
|
||||
type: 'backup'
|
||||
compression?: 'native' | 'zstd' | ''
|
||||
mode: Mode
|
||||
name: string
|
||||
remotes?: SimpleIdPattern
|
||||
settings: $Dict<Settings>
|
||||
srs?: SimpleIdPattern
|
||||
type: 'backup'
|
||||
vms: Pattern
|
||||
}
|
||||
interface MetadataBackupJob {
|
||||
id: string
|
||||
name: string
|
||||
pools?: SimpleIdPattern
|
||||
remotes: SimpleIdPattern
|
||||
settings: Settings
|
||||
type: 'metadataBackup'
|
||||
xoMetadata?: object
|
||||
}
|
||||
|
||||
interface Schedule {
|
||||
id: string
|
||||
}
|
||||
|
||||
interface Xapi {
|
||||
allowUnauthorized: boolean
|
||||
credentials: object
|
||||
url: string
|
||||
}
|
||||
|
||||
function importVmBackup(_: {
|
||||
backupId: string
|
||||
remote: Remote
|
||||
srUuid: string
|
||||
xapi: Xapi
|
||||
streamLogs: boolean = false
|
||||
}): string
|
||||
|
||||
function listPoolMetadataBackups(_: {
|
||||
remotes: { [id: string]: Remote }
|
||||
}): { [remoteId: string]: { [poolUuid: string]: object[] } }
|
||||
|
||||
function listVmBackups(_: {
|
||||
remotes: { [remoteId: string]: Remote }
|
||||
}): { [remoteId: string]: { [vmUuid: string]: object[] } }
|
||||
|
||||
function listXoMetadataBackups(_: { remotes: { [id: string]: Remote } }): { [remoteId: string]: object[] }
|
||||
|
||||
function run(_: {
|
||||
job: BackupJob | MetadataBackupJob
|
||||
remotes: { [id: string]: Remote }
|
||||
schedule: Schedule
|
||||
xapis?: { [id: string]: Xapi }
|
||||
recordToXapi?: { [recordUuid: string]: string }
|
||||
streamLogs: boolean = false
|
||||
}): string
|
||||
|
||||
function restoreMetadataBackup(_: { backupId: string; remote: Remote; xapi: Xapi }): ReadableStream
|
||||
}
|
||||
|
||||
declare namespace task {
|
||||
type Status = 'canceled' | 'failure' | 'interrupted' | 'pending' | 'skipped' | 'success'
|
||||
|
||||
interface Task {
|
||||
data: any
|
||||
end?: number
|
||||
id: string
|
||||
start: number
|
||||
status: Status
|
||||
tasks?: Task[]
|
||||
}
|
||||
|
||||
function cancel(_: { taskId: string })
|
||||
function destroy(_: { taskId: string })
|
||||
function get(_: { taskId: string }): string
|
||||
function getAll(): Task[]
|
||||
}
|
||||
|
||||
declare namespace remote {
|
||||
function test(Remote): object
|
||||
}
|
||||
```
|
||||
function destroy(_: { taskId: string })
|
||||
function get(_: { taskId: string }): string
|
||||
function getAll(): Task[]
|
||||
}
|
||||
|
||||
declare namespace remote {
|
||||
function test(Remote): object
|
||||
}
|
||||
```
|
||||
98
@xen-orchestra/proxy/package.json
Normal file
98
@xen-orchestra/proxy/package.json
Normal file
@@ -0,0 +1,98 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/proxy",
|
||||
"version": "0.10.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
"backup",
|
||||
"proxy",
|
||||
"xen-orchestra",
|
||||
"xo"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/proxy",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/proxy",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {
|
||||
"xo-proxy": "dist/index.js"
|
||||
},
|
||||
"files": [
|
||||
"config.toml",
|
||||
"dist/",
|
||||
"scripts/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.0",
|
||||
"@vates/compose": "^2.0.0",
|
||||
"@vates/decorate-with": "^0.0.1",
|
||||
"@vates/disposable": "^0.0.0",
|
||||
"@vates/parse-duration": "^0.1.0",
|
||||
"@xen-orchestra/backups": "^0.4.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "0.12.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/self-signed": "^0.1.0",
|
||||
"@xen-orchestra/xapi": "^0.4.1",
|
||||
"ajv": "^6.10.0",
|
||||
"app-conf": "^0.9.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"compare-versions": "^3.4.0",
|
||||
"fs-extra": "^8.1.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"getopts": "^2.2.3",
|
||||
"golike-defer": "^0.4.1",
|
||||
"http-server-plus": "^0.11.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"jsonrpc-websocket-client": "^0.5.0",
|
||||
"koa": "^2.5.1",
|
||||
"koa-compress": "^3.0.0",
|
||||
"koa-helmet": "^5.1.0",
|
||||
"koa-router": "^7.4.0",
|
||||
"lodash": "^4.17.10",
|
||||
"ms": "^2.1.2",
|
||||
"node-zone": "^0.4.0",
|
||||
"parse-pairs": "^1.0.0",
|
||||
"promise-toolbox": "^0.16.0",
|
||||
"source-map-support": "^0.5.16",
|
||||
"stoppable": "^1.0.6",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-common": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-class-properties": "^7.1.0",
|
||||
"@babel/plugin-proposal-decorators": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.7.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-transform-dev": "^2.0.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postinstall": "./scripts/systemd-service-installer",
|
||||
"prebuild": "yarn run clean && index-modules --cjs-lazy src/app/mixins",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"preuninstall": "./scripts/systemd-service-installer",
|
||||
"start": "./dist/index.js"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
34
@xen-orchestra/proxy/scripts/systemd-service-installer
Executable file
34
@xen-orchestra/proxy/scripts/systemd-service-installer
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
set -eu
|
||||
|
||||
if [ "$(id -u)" -ne 0 ]
|
||||
then
|
||||
exit
|
||||
fi
|
||||
|
||||
# $npm_package_name is not good enough here because it's a scoped package
|
||||
NAME=xo-proxy
|
||||
|
||||
SERVICE_FILE=$(pwd)/$NAME.service
|
||||
|
||||
if [ "$npm_lifecycle_event" = postinstall ]
|
||||
then
|
||||
printf %s "[Unit]
|
||||
Description=$NAME
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=$npm_config_prefix/bin/$NAME
|
||||
Restart=always
|
||||
SyslogIdentifier=$NAME
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
" > "$SERVICE_FILE"
|
||||
systemctl enable --now "$SERVICE_FILE"
|
||||
elif [ "$npm_lifecycle_event" = "preuninstall" ]
|
||||
then
|
||||
systemctl disable --now "$SERVICE_FILE"
|
||||
rm -f "$SERVICE_FILE"
|
||||
fi
|
||||
1
@xen-orchestra/proxy/src/app/_Profile.js
Normal file
1
@xen-orchestra/proxy/src/app/_Profile.js
Normal file
@@ -0,0 +1 @@
|
||||
export class Profile {}
|
||||
52
@xen-orchestra/proxy/src/app/index.js
Normal file
52
@xen-orchestra/proxy/src/app/index.js
Normal file
@@ -0,0 +1,52 @@
|
||||
import camelCase from 'lodash/camelCase'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
import mixins from './mixins'
|
||||
|
||||
const { defineProperties, defineProperty, keys } = Object
|
||||
const noop = Function.prototype
|
||||
|
||||
const MIXIN_CYCLIC_DESCRIPTOR = {
|
||||
configurable: true,
|
||||
get() {
|
||||
throw new Error('cyclic dependency')
|
||||
},
|
||||
}
|
||||
|
||||
export default class App {
|
||||
constructor(opts) {
|
||||
// add lazy property for each of the mixin, this allows mixins to depend on
|
||||
// one another without any special ordering
|
||||
const descriptors = {}
|
||||
keys(mixins).forEach(name => {
|
||||
const Mixin = mixins[name]
|
||||
name = camelCase(name)
|
||||
|
||||
descriptors[name] = {
|
||||
configurable: true,
|
||||
get: () => {
|
||||
defineProperty(this, name, MIXIN_CYCLIC_DESCRIPTOR)
|
||||
const instance = new Mixin(this, opts)
|
||||
defineProperty(this, name, {
|
||||
value: instance,
|
||||
})
|
||||
return instance
|
||||
},
|
||||
}
|
||||
})
|
||||
defineProperties(this, descriptors)
|
||||
|
||||
// access all mixin properties to trigger their creation
|
||||
keys(descriptors).forEach(name => {
|
||||
noop(this[name])
|
||||
})
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
this.config.watchDuration('resourceDebounce', delay => {
|
||||
debounceResource.defaultDelay = delay
|
||||
})
|
||||
this.hooks.once('stop', debounceResource.flushAll)
|
||||
|
||||
this.debounceResource = debounceResource
|
||||
}
|
||||
}
|
||||
266
@xen-orchestra/proxy/src/app/mixins/api.js
Normal file
266
@xen-orchestra/proxy/src/app/mixins/api.js
Normal file
@@ -0,0 +1,266 @@
|
||||
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
|
||||
import * as errors from 'xo-common/api-errors'
|
||||
import Ajv from 'ajv'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import compress from 'koa-compress'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import getStream from 'get-stream'
|
||||
import helmet from 'koa-helmet'
|
||||
import Koa from 'koa'
|
||||
import once from 'lodash/once'
|
||||
import Router from 'koa-router'
|
||||
import Zone from 'node-zone'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import { version as serverVersion } from '../../../package.json'
|
||||
|
||||
const { debug, warn } = createLogger('xo:proxy:api')
|
||||
|
||||
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
|
||||
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
|
||||
for await (const data of iterable) {
|
||||
yield JSON.stringify(data) + '\n'
|
||||
}
|
||||
})
|
||||
|
||||
export default class Api {
|
||||
constructor(app, { httpServer }) {
|
||||
this._ajv = new Ajv({ allErrors: true })
|
||||
this._methods = { __proto__: null }
|
||||
|
||||
const router = new Router({ prefix: '/api/v1' }).post('/', async ctx => {
|
||||
// Before Node 13.0 there was an inactivity timeout of 2 mins, which may
|
||||
// not be enough for the API.
|
||||
ctx.req.setTimeout(0)
|
||||
|
||||
const profile = await app.authentication.findProfile({
|
||||
authenticationToken: ctx.cookies.get('authenticationToken'),
|
||||
})
|
||||
if (profile === undefined) {
|
||||
ctx.status = 401
|
||||
return
|
||||
}
|
||||
|
||||
let body = await getStream(ctx.req)
|
||||
try {
|
||||
body = parse(body)
|
||||
} catch (error) {
|
||||
ctx.body = format.error(null, error)
|
||||
return
|
||||
}
|
||||
|
||||
const zone = Zone.current.fork('POST /api')
|
||||
zone.data.profile = profile
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await zone.run(() => this._call(body.method, body.params))
|
||||
} catch (error) {
|
||||
const { method, params } = body
|
||||
warn('call error', { method, params, error })
|
||||
ctx.set('Content-Type', 'application/json')
|
||||
ctx.body = format.error(body.id, error)
|
||||
return
|
||||
}
|
||||
|
||||
if (typeof result?.pipe === 'function' && !result._readableState?.objectMode) {
|
||||
ctx.body = result
|
||||
return
|
||||
}
|
||||
|
||||
ctx.set('Content-Type', 'application/json')
|
||||
|
||||
const isAsyncIterable =
|
||||
result !== null &&
|
||||
typeof result === 'object' &&
|
||||
(typeof result[Symbol.iterator] === 'function' || typeof result[Symbol.asyncIterator] === 'function')
|
||||
if (isAsyncIterable) {
|
||||
const stream = ndJsonStream(body.id, result)
|
||||
ctx.body = stream
|
||||
|
||||
const keepAliveInterval = app.config.get('api.keepAliveInterval')
|
||||
if (keepAliveInterval !== 0) {
|
||||
// In the wild, long term HTTP requests with period of inactivity often
|
||||
// breaks, send some data every 10s to keep it opened.
|
||||
const stopTimer = clearInterval.bind(
|
||||
undefined,
|
||||
setInterval(() => stream.push(' '), keepAliveInterval)
|
||||
)
|
||||
stream.on('end', stopTimer).on('error', stopTimer)
|
||||
}
|
||||
} else {
|
||||
ctx.body = format.response(body.id, result !== undefined ? result : true)
|
||||
}
|
||||
})
|
||||
|
||||
const koa = new Koa()
|
||||
.on('error', warn)
|
||||
.use(helmet())
|
||||
.use(compress())
|
||||
.use(router.routes())
|
||||
.use(router.allowedMethods())
|
||||
|
||||
httpServer.on('request', koa.callback())
|
||||
|
||||
this.addMethods({
|
||||
system: {
|
||||
getMethodsInfo: [
|
||||
function* () {
|
||||
const methods = this._methods
|
||||
for (const name in methods) {
|
||||
const { description, params = {} } = methods[name]
|
||||
yield { description, name, params }
|
||||
}
|
||||
}.bind(this),
|
||||
{
|
||||
description: 'returns the signatures of all available API methods',
|
||||
},
|
||||
],
|
||||
getServerVersion: [
|
||||
() => serverVersion,
|
||||
{
|
||||
description: 'returns the version of xo-server',
|
||||
},
|
||||
],
|
||||
listMethods: [
|
||||
function* () {
|
||||
const methods = this._methods
|
||||
for (const name in methods) {
|
||||
yield name
|
||||
}
|
||||
}.bind(this),
|
||||
{
|
||||
description: 'returns the name of all available API methods',
|
||||
},
|
||||
],
|
||||
methodSignature: [
|
||||
({ method: name }) => {
|
||||
const method = this._methods[name]
|
||||
if (method === undefined) {
|
||||
throw errors.noSuchObject('method', name)
|
||||
}
|
||||
|
||||
const { description, params = {} } = method
|
||||
return { description, name, params }
|
||||
},
|
||||
{
|
||||
description: 'returns the signature of an API method',
|
||||
params: {
|
||||
method: { type: 'string' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
test: {
|
||||
range: [
|
||||
function* ({ start = 0, stop, step }) {
|
||||
if (step === undefined) {
|
||||
step = start > stop ? -1 : 1
|
||||
}
|
||||
if (step > 0) {
|
||||
for (; start < stop; start += step) {
|
||||
yield start
|
||||
}
|
||||
} else {
|
||||
for (; start > stop; start += step) {
|
||||
yield start
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
params: {
|
||||
start: { optional: true, type: 'number' },
|
||||
step: { optional: true, type: 'number' },
|
||||
stop: { type: 'number' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
addMethod(name, method, { description, params = {} } = {}) {
|
||||
const methods = this._methods
|
||||
|
||||
if (name in methods) {
|
||||
throw new Error(`API method ${name} already exists`)
|
||||
}
|
||||
|
||||
const ajv = this._ajv
|
||||
const validate = ajv.compile({
|
||||
// we want additional properties to be disabled by default
|
||||
additionalProperties: params['*'] || false,
|
||||
|
||||
properties: params,
|
||||
|
||||
// we want params to be required by default unless explicitly marked so
|
||||
// we use property `optional` instead of object `required`
|
||||
required: Object.keys(params).filter(name => {
|
||||
const param = params[name]
|
||||
const required = !param.optional
|
||||
delete param.optional
|
||||
return required
|
||||
}),
|
||||
|
||||
type: 'object',
|
||||
})
|
||||
|
||||
const m = params => {
|
||||
if (!validate(params)) {
|
||||
throw errors.invalidParameters(validate.errors)
|
||||
}
|
||||
return method(params)
|
||||
}
|
||||
m.description = description
|
||||
m.params = params
|
||||
|
||||
methods[name] = m
|
||||
|
||||
return once(() => {
|
||||
delete methods[name]
|
||||
})
|
||||
}
|
||||
|
||||
addMethods(methods) {
|
||||
let base = ''
|
||||
const removes = []
|
||||
|
||||
const addMethod = (method, name) => {
|
||||
name = base + name
|
||||
|
||||
if (typeof method === 'function') {
|
||||
removes.push(this.addMethod(name, method))
|
||||
return
|
||||
} else if (Array.isArray(method)) {
|
||||
removes.push(this.addMethod(name, ...method))
|
||||
return
|
||||
}
|
||||
|
||||
const oldBase = base
|
||||
base = name + '.'
|
||||
forOwn(method, addMethod)
|
||||
base = oldBase
|
||||
}
|
||||
|
||||
try {
|
||||
forOwn(methods, addMethod)
|
||||
} catch (error) {
|
||||
// Remove all added methods.
|
||||
forOwn(removes, remove => remove())
|
||||
|
||||
// Forward the error
|
||||
throw error
|
||||
}
|
||||
|
||||
return once => forOwn(removes, remove => remove())
|
||||
}
|
||||
|
||||
_call(method, params = {}) {
|
||||
debug(`call: ${method}()`, { method, params })
|
||||
const fn = this._methods[method]
|
||||
if (fn === undefined) {
|
||||
throw new MethodNotFound(method)
|
||||
}
|
||||
return fn(params)
|
||||
}
|
||||
}
|
||||
159
@xen-orchestra/proxy/src/app/mixins/appliance.js
Normal file
159
@xen-orchestra/proxy/src/app/mixins/appliance.js
Normal file
@@ -0,0 +1,159 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import JsonRpcWebsocketClient from 'jsonrpc-websocket-client'
|
||||
import parsePairs from 'parse-pairs'
|
||||
import using from 'promise-toolbox/using'
|
||||
import { createLogger } from '@xen-orchestra/log/dist'
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
import { execFile, spawn } from 'child_process'
|
||||
import { readFile } from 'fs-extra'
|
||||
|
||||
const TUNNEL_SERVICE = 'xoa-support-tunnel.service'
|
||||
|
||||
const { debug, warn } = createLogger('xo:proxy:appliance')
|
||||
|
||||
const getUpdater = deduped(async function () {
|
||||
const updater = new JsonRpcWebsocketClient('ws://localhost:9001')
|
||||
await updater.open()
|
||||
return new Disposable(updater, () => updater.close())
|
||||
})
|
||||
|
||||
const callUpdate = params =>
|
||||
using(
|
||||
getUpdater(),
|
||||
updater =>
|
||||
new Promise((resolve, reject) => {
|
||||
updater
|
||||
.on('error', reject)
|
||||
.on('notification', ({ method, params }) => {
|
||||
if (method === 'print') {
|
||||
debug('updater.update: ' + params.content)
|
||||
} else if (method === 'end') {
|
||||
resolve(params)
|
||||
} else if (method === 'server-error') {
|
||||
reject(new Error(params.message))
|
||||
} else if (method !== 'connected') {
|
||||
warn('update.update, unhandled message', {
|
||||
method,
|
||||
params,
|
||||
})
|
||||
}
|
||||
})
|
||||
.notify('update', params)
|
||||
})
|
||||
)
|
||||
|
||||
async function checkAppliance() {
|
||||
const child = spawn('xoa', ['check'], {
|
||||
all: true,
|
||||
env: {
|
||||
...process.env,
|
||||
|
||||
// dont inherit this var from xo-server or the output will be polluted
|
||||
DEBUG: '',
|
||||
|
||||
FORCE_COLOR: '1',
|
||||
},
|
||||
})
|
||||
|
||||
const chunks = []
|
||||
let length = 0
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
child.stdout.on('data', onData)
|
||||
child.stderr.on('data', onData)
|
||||
|
||||
await fromEvent(child, 'exit')
|
||||
|
||||
return Buffer.concat(chunks, length).toString()
|
||||
}
|
||||
|
||||
async function closeSupportTunnel() {
|
||||
await fromCallback(execFile, 'systemctl', ['stop', TUNNEL_SERVICE])
|
||||
}
|
||||
|
||||
async function getApplianceInfo() {
|
||||
const pairs = parsePairs(await readFile('/etc/os-release', 'utf8'))
|
||||
return {
|
||||
build: pairs.XOA_BUILD,
|
||||
os: pairs.ID,
|
||||
osVersion: pairs.VERSION_ID,
|
||||
}
|
||||
}
|
||||
|
||||
async function getStateSupportTunnel() {
|
||||
const isActive =
|
||||
(await fromEvent(
|
||||
spawn('systemctl', ['is-active', '--quiet', TUNNEL_SERVICE], {
|
||||
stdio: 'ignore',
|
||||
}),
|
||||
'exit'
|
||||
)) === 0
|
||||
|
||||
const isActiveOrFailed =
|
||||
isActive ||
|
||||
(await fromEvent(
|
||||
spawn('systemctl', ['is-failed', '--quiet', TUNNEL_SERVICE], {
|
||||
stdio: 'ignore',
|
||||
}),
|
||||
'exit'
|
||||
)) === 0
|
||||
|
||||
return {
|
||||
open: isActive,
|
||||
stdout: isActiveOrFailed ? await fromCallback(readFile, '/tmp/xoa-support-tunnel.out', 'utf8') : '',
|
||||
}
|
||||
}
|
||||
|
||||
async function openSupportTunnel() {
|
||||
await fromCallback(execFile, 'systemctl', ['start', TUNNEL_SERVICE])
|
||||
}
|
||||
|
||||
export default class Appliance {
|
||||
constructor(app) {
|
||||
app.api.addMethods({
|
||||
appliance: {
|
||||
check: checkAppliance,
|
||||
getInfo: [
|
||||
getApplianceInfo,
|
||||
{
|
||||
description: 'returns various information about the appliance itself',
|
||||
},
|
||||
],
|
||||
supportTunnel: {
|
||||
close: [
|
||||
closeSupportTunnel,
|
||||
{
|
||||
description: 'close the support tunnel',
|
||||
},
|
||||
],
|
||||
getState: [
|
||||
getStateSupportTunnel,
|
||||
{
|
||||
description: 'getState the support tunnel',
|
||||
},
|
||||
],
|
||||
open: [
|
||||
openSupportTunnel,
|
||||
{
|
||||
description: 'open the support tunnel',
|
||||
},
|
||||
],
|
||||
},
|
||||
updater: {
|
||||
getLocalManifest: () => using(getUpdater(), _ => _.call('getLocalManifest')),
|
||||
getState: () => callUpdate(),
|
||||
upgrade: () => callUpdate({ upgrade: true }),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// A proxy can be bound to a unique license
|
||||
getSelfLicense() {
|
||||
return using(getUpdater(), _ => _.call('getSelfLicenses').then(licenses => licenses[0]))
|
||||
}
|
||||
}
|
||||
43
@xen-orchestra/proxy/src/app/mixins/authentication.js
Normal file
43
@xen-orchestra/proxy/src/app/mixins/authentication.js
Normal file
@@ -0,0 +1,43 @@
|
||||
import xdg from 'xdg-basedir'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { execFileSync } from 'child_process'
|
||||
import { outputFileSync } from 'fs-extra'
|
||||
|
||||
import { Profile } from '../_Profile'
|
||||
|
||||
const { warn } = createLogger('xo:proxy:authentication')
|
||||
|
||||
const isValidToken = t => typeof t === 'string' && t.length !== 0
|
||||
|
||||
export default class Authentication {
|
||||
constructor(_, { appName, config: { authenticationToken: token } }) {
|
||||
if (!isValidToken(token)) {
|
||||
token = JSON.parse(execFileSync('xenstore-read', ['vm-data/xo-proxy-authenticationToken']))
|
||||
|
||||
if (!isValidToken(token)) {
|
||||
throw new Error('missing authenticationToken in configuration')
|
||||
}
|
||||
|
||||
try {
|
||||
// save this token in the automatically handled conf file
|
||||
outputFileSync(
|
||||
// this file must take precedence over normal user config
|
||||
`${xdg.config}/${appName}/config.z-auto.json`,
|
||||
JSON.stringify({ authenticationToken: token }),
|
||||
{ mode: 0o600 }
|
||||
)
|
||||
execFileSync('xenstore-rm', ['vm-data/xo-proxy-authenticationToken'])
|
||||
} catch (error) {
|
||||
warn('failed to remove token from XenStore', { error })
|
||||
}
|
||||
}
|
||||
|
||||
this._token = token
|
||||
}
|
||||
|
||||
async findProfile(credentials) {
|
||||
if (credentials?.authenticationToken === this._token) {
|
||||
return new Profile()
|
||||
}
|
||||
}
|
||||
}
|
||||
407
@xen-orchestra/proxy/src/app/mixins/backups/index.js
Normal file
407
@xen-orchestra/proxy/src/app/mixins/backups/index.js
Normal file
@@ -0,0 +1,407 @@
|
||||
import defer from 'golike-defer'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import mapValues from 'lodash/mapValues'
|
||||
import using from 'promise-toolbox/using'
|
||||
import { asyncMap } from '@xen-orchestra/backups/asyncMap'
|
||||
import { Backup } from '@xen-orchestra/backups/Backup'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log/dist'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
import { DurablePartition } from '@xen-orchestra/backups/DurablePartition'
|
||||
import { execFile } from 'child_process'
|
||||
import { formatVmBackup } from '@xen-orchestra/backups/formatVmBackup'
|
||||
import { ImportVmBackup } from '@xen-orchestra/backups/ImportVmBackup'
|
||||
import { Readable } from 'stream'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter'
|
||||
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup'
|
||||
import { Task } from '@xen-orchestra/backups/task'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const { warn } = createLogger('xo:proxy:backups')
|
||||
|
||||
const runWithLogs = (runner, args) =>
|
||||
new Readable({
|
||||
objectMode: true,
|
||||
read() {
|
||||
this._read = noop
|
||||
|
||||
runner(args, log => this.push(log)).then(
|
||||
() => this.push(null),
|
||||
error => this.emit('error', error)
|
||||
)
|
||||
},
|
||||
})
|
||||
|
||||
export default class Backups {
|
||||
constructor(app) {
|
||||
this._app = app
|
||||
|
||||
// clean any LVM volumes that might have not been properly
|
||||
// unmounted
|
||||
app.hooks.on('start', async () => {
|
||||
await Promise.all([fromCallback(execFile, 'losetup', ['-D']), fromCallback(execFile, 'vgchange', ['-an'])])
|
||||
await fromCallback(execFile, 'pvscan', ['--cache'])
|
||||
})
|
||||
|
||||
let run = ({ xapis, ...rest }) =>
|
||||
new Backup({
|
||||
...rest,
|
||||
|
||||
// don't change config during backup execution
|
||||
config: app.config.get('backups'),
|
||||
|
||||
// pass getAdapter in order to mutualize the adapter resources usage
|
||||
getAdapter: this.getAdapter.bind(this),
|
||||
getConnectedXapi: id => this.getXapi(xapis[id]),
|
||||
}).run()
|
||||
|
||||
const runningJobs = { __proto__: null }
|
||||
run = (run => {
|
||||
return async function (params) {
|
||||
const jobId = params.job.id
|
||||
if (jobId === undefined) {
|
||||
return run.apply(this, arguments)
|
||||
}
|
||||
if (jobId in runningJobs) {
|
||||
const error = new Error('job is already running')
|
||||
error.jobId = jobId
|
||||
throw error
|
||||
}
|
||||
runningJobs[jobId] = true
|
||||
try {
|
||||
return await run.apply(this, arguments)
|
||||
} finally {
|
||||
delete runningJobs[jobId]
|
||||
}
|
||||
}
|
||||
})(run)
|
||||
run = (run =>
|
||||
async function () {
|
||||
if (!__DEV__) {
|
||||
const license = await app.appliance.getSelfLicense()
|
||||
if (license === undefined || license.expires < Date.now()) {
|
||||
throw new Error('the proxy license is not valid')
|
||||
}
|
||||
}
|
||||
return run.apply(this, arguments)
|
||||
})(run)
|
||||
run = (run => async (params, onLog) => {
|
||||
if (onLog === undefined) {
|
||||
return run(params)
|
||||
}
|
||||
|
||||
const { job, schedule } = params
|
||||
try {
|
||||
await Task.run(
|
||||
{
|
||||
name: 'backup run',
|
||||
data: {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
mode: job.mode,
|
||||
reportWhen: job.settings['']?.reportWhen,
|
||||
scheduleId: schedule.id,
|
||||
},
|
||||
onLog,
|
||||
},
|
||||
() => run(params)
|
||||
)
|
||||
} catch (error) {
|
||||
// do not rethrow, everything is handled via logging
|
||||
}
|
||||
})(run)
|
||||
|
||||
app.api.addMethods({
|
||||
backup: {
|
||||
deleteMetadataBackup: [
|
||||
({ backupId, remote }) => using(this.getAdapter(remote), adapter => adapter.deleteMetadataBackup(backupId)),
|
||||
{
|
||||
description: 'delete Metadata backup',
|
||||
params: {
|
||||
backupId: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
deleteVmBackup: [
|
||||
({ filename, remote }) => using(this.getAdapter(remote), adapter => adapter.deleteVmBackup(filename)),
|
||||
{
|
||||
description: 'delete VM backup',
|
||||
params: {
|
||||
filename: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
fetchPartitionFiles: [
|
||||
({ disk: diskId, remote, partition: partitionId, paths }) =>
|
||||
using(this.getAdapter(remote), adapter => adapter.fetchPartitionFiles(diskId, partitionId, paths)),
|
||||
{
|
||||
description: 'fetch files from partition',
|
||||
params: {
|
||||
disk: { type: 'string' },
|
||||
partition: { type: 'string', optional: true },
|
||||
paths: { type: 'array', items: { type: 'string' } },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
importVmBackup: [
|
||||
defer(($defer, { backupId, remote, srUuid, streamLogs = false, xapi: xapiOpts }) =>
|
||||
using(this.getAdapter(remote), this.getXapi(xapiOpts), async (adapter, xapi) => {
|
||||
const metadata = await adapter.readVmBackupMetadata(backupId)
|
||||
const run = () => new ImportVmBackup({ adapter, metadata, srUuid, xapi }).run()
|
||||
return streamLogs
|
||||
? runWithLogs(
|
||||
async (args, onLog) =>
|
||||
Task.run(
|
||||
{
|
||||
data: {
|
||||
jobId: metadata.jobId,
|
||||
srId: srUuid,
|
||||
time: metadata.timestamp,
|
||||
},
|
||||
name: 'restore',
|
||||
onLog,
|
||||
},
|
||||
run
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
: run()
|
||||
})
|
||||
),
|
||||
{
|
||||
description: 'create a new VM from a backup',
|
||||
params: {
|
||||
backupId: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
srUuid: { type: 'string' },
|
||||
streamLogs: { type: 'boolean', optional: true },
|
||||
xapi: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
listDiskPartitions: [
|
||||
({ disk: diskId, remote }) => using(this.getAdapter(remote), adapter => adapter.listPartitions(diskId)),
|
||||
{
|
||||
description: 'list disk partitions',
|
||||
params: {
|
||||
disk: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
listPartitionFiles: [
|
||||
({ disk: diskId, remote, partition: partitionId, path }) =>
|
||||
using(this.getAdapter(remote), adapter => adapter.listPartitionFiles(diskId, partitionId, path)),
|
||||
{
|
||||
description: 'list partition files',
|
||||
params: {
|
||||
disk: { type: 'string' },
|
||||
partition: { type: 'string', optional: true },
|
||||
path: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
listPoolMetadataBackups: [
|
||||
async ({ remotes }) => {
|
||||
const backupsByRemote = {}
|
||||
await asyncMap(Object.entries(remotes), async ([remoteId, remote]) => {
|
||||
try {
|
||||
await using(this.getAdapter(remote), async adapter => {
|
||||
backupsByRemote[remoteId] = await adapter.listPoolMetadataBackups()
|
||||
})
|
||||
} catch (error) {
|
||||
warn('listPoolMetadataBackups', { error, remote })
|
||||
}
|
||||
})
|
||||
return backupsByRemote
|
||||
},
|
||||
{
|
||||
description: 'list pool metadata backups',
|
||||
params: {
|
||||
remotes: {
|
||||
type: 'object',
|
||||
additionalProperties: { type: 'object' },
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
listVmBackups: [
|
||||
async ({ remotes }) => {
|
||||
const backups = {}
|
||||
await asyncMap(Object.keys(remotes), async remoteId => {
|
||||
try {
|
||||
await using(this.getAdapter(remotes[remoteId]), async adapter => {
|
||||
backups[remoteId] = mapValues(await adapter.listAllVmBackups(), vmBackups =>
|
||||
vmBackups.map(backup => formatVmBackup(backup))
|
||||
)
|
||||
})
|
||||
} catch (error) {
|
||||
warn('listVmBackups', { error, remote: remotes[remoteId] })
|
||||
}
|
||||
})
|
||||
return backups
|
||||
},
|
||||
{
|
||||
description: 'list VM backups',
|
||||
params: {
|
||||
remotes: {
|
||||
type: 'object',
|
||||
additionalProperties: { type: 'object' },
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
listRunningJobs: [
|
||||
() => Object.keys(runningJobs),
|
||||
{
|
||||
description: 'returns a list of running jobs',
|
||||
},
|
||||
],
|
||||
listXoMetadataBackups: [
|
||||
async ({ remotes }) => {
|
||||
const backupsByRemote = {}
|
||||
await asyncMap(Object.entries(remotes), async ([remoteId, remote]) => {
|
||||
try {
|
||||
await using(this.getAdapter(remote), async adapter => {
|
||||
backupsByRemote[remoteId] = await adapter.listXoMetadataBackups()
|
||||
})
|
||||
} catch (error) {
|
||||
warn('listXoMetadataBackups', { error, remote })
|
||||
}
|
||||
})
|
||||
return backupsByRemote
|
||||
},
|
||||
{
|
||||
description: 'list XO metadata backups',
|
||||
params: {
|
||||
remotes: {
|
||||
type: 'object',
|
||||
additionalProperties: { type: 'object' },
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
restoreMetadataBackup: [
|
||||
({ backupId, remote, xapi: xapiOptions }) =>
|
||||
using(app.remotes.getHandler(remote), xapiOptions && this.getXapi(xapiOptions), (handler, xapi) =>
|
||||
runWithLogs(
|
||||
async (args, onLog) =>
|
||||
Task.run(
|
||||
{
|
||||
name: 'metadataRestore',
|
||||
data: JSON.parse(String(await handler.readFile(`${backupId}/metadata.json`))),
|
||||
onLog,
|
||||
},
|
||||
() =>
|
||||
new RestoreMetadataBackup({
|
||||
backupId,
|
||||
handler,
|
||||
xapi,
|
||||
}).run()
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
),
|
||||
{
|
||||
description: 'restore a metadata backup',
|
||||
params: {
|
||||
backupId: { type: 'string' },
|
||||
remote: { type: 'object' },
|
||||
xapi: { type: 'object', optional: true },
|
||||
},
|
||||
},
|
||||
],
|
||||
run: [
|
||||
({ streamLogs = false, ...rest }) => (streamLogs ? runWithLogs(run, rest) : run(rest)),
|
||||
{
|
||||
description: 'run a backup job',
|
||||
params: {
|
||||
job: { type: 'object' },
|
||||
remotes: { type: 'object' },
|
||||
schedule: { type: 'object' },
|
||||
xapis: { type: 'object', optional: true },
|
||||
recordToXapi: { type: 'object', optional: true },
|
||||
streamLogs: { type: 'boolean', optional: true },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
|
||||
const durablePartition = new DurablePartition()
|
||||
app.hooks.once('stop', () => durablePartition.flushAll())
|
||||
|
||||
app.api.addMethods({
|
||||
backup: {
|
||||
mountPartition: [
|
||||
async ({ disk, partition, remote }) =>
|
||||
using(this.getAdapter(remote), adapter => durablePartition.mount(adapter, disk, partition)),
|
||||
{
|
||||
description: 'mount a partition',
|
||||
params: {
|
||||
disk: { type: 'string' },
|
||||
partition: { type: 'string', optional: true },
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
unmountPartition: [
|
||||
async ({ path }) => durablePartition.unmount(path),
|
||||
{
|
||||
description: 'unmount a partition',
|
||||
params: {
|
||||
path: { type: 'string' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// FIXME: invalidate cache on remote option change
|
||||
@decorateWith(compose, function (resource) {
|
||||
return this._app.debounceResource(resource)
|
||||
})
|
||||
@decorateWith(deduped, remote => [remote.url])
|
||||
@decorateWith(Disposable.factory)
|
||||
*getAdapter(remote) {
|
||||
const app = this._app
|
||||
return new RemoteAdapter(yield app.remotes.getHandler(remote), {
|
||||
debounceResource: app.debounceResource.bind(app),
|
||||
dirMode: app.config.get('backups.dirMode'),
|
||||
})
|
||||
}
|
||||
|
||||
// FIXME: invalidate cache on options change
|
||||
@decorateWith(compose, function (resource) {
|
||||
return this._app.debounceResource(resource)
|
||||
})
|
||||
@decorateWith(deduped, ({ url }) => [url])
|
||||
@decorateWith(Disposable.factory)
|
||||
async *getXapi({ credentials: { username: user, password }, ...opts }) {
|
||||
const xapi = new Xapi({
|
||||
...this._app.config.get('xapiOptions'),
|
||||
...opts,
|
||||
auth: {
|
||||
user,
|
||||
password,
|
||||
},
|
||||
})
|
||||
|
||||
await xapi.connect()
|
||||
try {
|
||||
await xapi.objectsFetched
|
||||
|
||||
yield xapi
|
||||
} finally {
|
||||
await xapi.disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
71
@xen-orchestra/proxy/src/app/mixins/config.js
Normal file
71
@xen-orchestra/proxy/src/app/mixins/config.js
Normal file
@@ -0,0 +1,71 @@
|
||||
import get from 'lodash/get'
|
||||
import identity from 'lodash/identity'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { watch } from 'app-conf'
|
||||
|
||||
const { warn } = createLogger('xo:proxy:config')
|
||||
|
||||
export default class Config {
|
||||
constructor(app, { appDir, appName, config }) {
|
||||
this._config = config
|
||||
const watchers = (this._watchers = new Set())
|
||||
|
||||
app.hooks.on('start', async () => {
|
||||
app.hooks.on(
|
||||
'stop',
|
||||
await watch({ appDir, appName, ignoreUnknownFormats: true }, (error, config) => {
|
||||
if (error != null) {
|
||||
return warn(error)
|
||||
}
|
||||
|
||||
this._config = config
|
||||
watchers.forEach(watcher => {
|
||||
watcher(config)
|
||||
})
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
get(path) {
|
||||
const value = get(this._config, path)
|
||||
if (value === undefined) {
|
||||
throw new TypeError('missing config entry: ' + value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
getDuration(path) {
|
||||
return parseDuration(this.get(path))
|
||||
}
|
||||
|
||||
watch(path, cb) {
|
||||
// internal arg
|
||||
const processor = arguments.length > 2 ? arguments[2] : identity
|
||||
|
||||
let prev
|
||||
const watcher = config => {
|
||||
try {
|
||||
const value = processor(get(config, path))
|
||||
if (value !== prev) {
|
||||
prev = value
|
||||
cb(value)
|
||||
}
|
||||
} catch (error) {
|
||||
warn('watch', { error, path })
|
||||
}
|
||||
}
|
||||
|
||||
// ensure sync initialization
|
||||
watcher(this._config)
|
||||
|
||||
const watchers = this._watchers
|
||||
watchers.add(watcher)
|
||||
return () => watchers.delete(watcher)
|
||||
}
|
||||
|
||||
watchDuration(path, cb) {
|
||||
return this.watch(path, cb, parseDuration)
|
||||
}
|
||||
}
|
||||
49
@xen-orchestra/proxy/src/app/mixins/hooks.js
Normal file
49
@xen-orchestra/proxy/src/app/mixins/hooks.js
Normal file
@@ -0,0 +1,49 @@
|
||||
import assert from 'assert'
|
||||
import emitAsync from '@xen-orchestra/emit-async'
|
||||
import EventEmitter from 'events'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const { debug, warn } = createLogger('xo:proxy:hooks')
|
||||
|
||||
const runHook = async (emitter, hook) => {
|
||||
debug(`${hook} start…`)
|
||||
await emitAsync.call(
|
||||
emitter,
|
||||
{
|
||||
onError: error => warn(`${hook} failure`, { error }),
|
||||
},
|
||||
hook
|
||||
)
|
||||
debug(`${hook} finished`)
|
||||
}
|
||||
|
||||
export default class Hooks extends EventEmitter {
|
||||
// Run *clean* async listeners.
|
||||
//
|
||||
// They normalize existing data, clear invalid entries, etc.
|
||||
clean() {
|
||||
return runHook(this, 'clean')
|
||||
}
|
||||
|
||||
_status = 'stopped'
|
||||
|
||||
// Run *start* async listeners.
|
||||
//
|
||||
// They initialize the application.
|
||||
async start() {
|
||||
assert.strictEqual(this._status, 'stopped')
|
||||
this._status = 'starting'
|
||||
await runHook(this, 'start')
|
||||
this.emit((this._status = 'started'))
|
||||
}
|
||||
|
||||
// Run *stop* async listeners.
|
||||
//
|
||||
// They close connections, unmount file systems, save states, etc.
|
||||
async stop() {
|
||||
assert.strictEqual(this._status, 'started')
|
||||
this._status = 'stopping'
|
||||
await runHook(this, 'stop')
|
||||
this.emit((this._status = 'stopped'))
|
||||
}
|
||||
}
|
||||
56
@xen-orchestra/proxy/src/app/mixins/remotes.js
Normal file
56
@xen-orchestra/proxy/src/app/mixins/remotes.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import using from 'promise-toolbox/using'
|
||||
import { compose } from '@vates/compose'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
|
||||
export default class Remotes {
|
||||
constructor(app) {
|
||||
this._app = app
|
||||
|
||||
app.api.addMethods({
|
||||
remote: {
|
||||
getInfo: [
|
||||
({ remote }) => using(this.getHandler(remote), handler => handler.getInfo()),
|
||||
{
|
||||
params: {
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
test: [
|
||||
({ remote }) =>
|
||||
using(this.getHandler(remote), handler => handler.test()).catch(error => ({
|
||||
success: false,
|
||||
error: error.message ?? String(error),
|
||||
})),
|
||||
{
|
||||
params: {
|
||||
remote: { type: 'object' },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// FIXME: invalidate cache on remote option change
|
||||
@decorateWith(compose, function (resource) {
|
||||
return this._app.debounceResource(resource)
|
||||
})
|
||||
@decorateWith(deduped, remote => [remote.url])
|
||||
async getHandler(remote) {
|
||||
const { config } = this._app
|
||||
const handler = getHandler(remote, config.get('remoteOptions'))
|
||||
|
||||
if (config.get('remotes.disableFileRemotes') && handler.type === 'file') {
|
||||
throw new Error('Local remotes are disabled in proxies')
|
||||
}
|
||||
|
||||
await handler.sync()
|
||||
return new Disposable(handler, () => handler.forget())
|
||||
}
|
||||
}
|
||||
164
@xen-orchestra/proxy/src/index.js
Executable file
164
@xen-orchestra/proxy/src/index.js
Executable file
@@ -0,0 +1,164 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const APP_NAME = 'xo-proxy'
|
||||
const APP_DIR = require('path').join(__dirname, '..')
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
{
|
||||
const { catchGlobalErrors } = require('@xen-orchestra/log/configure')
|
||||
|
||||
catchGlobalErrors(require('@xen-orchestra/log').createLogger('xo:proxy'))
|
||||
}
|
||||
|
||||
const { fatal, info, warn } = require('@xen-orchestra/log').createLogger('xo:proxy:bootstrap')
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const main = async args => {
|
||||
const opts = require('getopts')(args, {
|
||||
boolean: ['help', 'safe-mode'],
|
||||
alias: {
|
||||
help: ['h'],
|
||||
},
|
||||
})
|
||||
|
||||
if (opts.help) {
|
||||
const { name, version } = require('../package.json')
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(
|
||||
'%s',
|
||||
`
|
||||
${name} v${version}
|
||||
`
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
info('starting')
|
||||
|
||||
const config = await require('app-conf').load(APP_NAME, {
|
||||
appDir: APP_DIR,
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
|
||||
let httpServer = new (require('http-server-plus'))({
|
||||
createSecureServer:
|
||||
require('compare-versions')(process.version, '10.10.0') >= 0
|
||||
? (({ createSecureServer }) => opts => createSecureServer({ ...opts, allowHTTP1: true }))(require('http2'))
|
||||
: undefined,
|
||||
})
|
||||
|
||||
const { readFileSync, outputFileSync, unlinkSync } = require('fs-extra')
|
||||
const retry = require('promise-toolbox/retry')
|
||||
|
||||
require('lodash/forOwn')(config.http.listen, async ({ autoCert, cert, key, ...opts }) => {
|
||||
try {
|
||||
const niceAddress = await retry(
|
||||
async () => {
|
||||
if (cert !== undefined && key !== undefined) {
|
||||
try {
|
||||
opts.cert = readFileSync(cert)
|
||||
opts.key = readFileSync(key)
|
||||
} catch (error) {
|
||||
if (!(autoCert && error.code === 'ENOENT')) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const pems = await require('@xen-orchestra/self-signed').genSelfSignedCert()
|
||||
outputFileSync(cert, pems.cert, { flag: 'wx', mode: 0o400 })
|
||||
outputFileSync(key, pems.key, { flag: 'wx', mode: 0o400 })
|
||||
info('new certificate generated', { cert, key })
|
||||
opts.cert = pems.cert
|
||||
opts.key = pems.key
|
||||
}
|
||||
}
|
||||
|
||||
return httpServer.listen(opts)
|
||||
},
|
||||
{
|
||||
tries: 2,
|
||||
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
|
||||
onRetry: () => {
|
||||
warn('deleting invalid certificate')
|
||||
unlinkSync(cert)
|
||||
unlinkSync(key)
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
info(`Web server listening on ${niceAddress}`)
|
||||
} catch (error) {
|
||||
if (error.niceAddress !== undefined) {
|
||||
warn(`Web server could not listen on ${error.niceAddress}`)
|
||||
|
||||
const { code } = error
|
||||
if (code === 'EACCES') {
|
||||
warn(' Access denied.')
|
||||
warn(' Ports < 1024 are often reserved to privileges users.')
|
||||
} else if (code === 'EADDRINUSE') {
|
||||
warn(' Address already in use.')
|
||||
}
|
||||
} else {
|
||||
warn('web server could not listen', { error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
const { group, user } = config
|
||||
group != null && process.setgid(group)
|
||||
user != null && process.setuid(user)
|
||||
|
||||
try {
|
||||
// The default value of 10 appears to be too small for interesting traces in xo-proxy.
|
||||
Error.stackTraceLimit = 20
|
||||
|
||||
require('source-map-support/register')
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
}
|
||||
|
||||
httpServer = require('stoppable')(httpServer)
|
||||
|
||||
const App = require('./app').default
|
||||
const app = new App({
|
||||
appDir: APP_DIR,
|
||||
appName: APP_NAME,
|
||||
config,
|
||||
httpServer,
|
||||
safeMode: opts['--safe-mode'],
|
||||
})
|
||||
|
||||
// dont delay require to stopping phase because deps may no longer be there (eg on uninstall)
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
app.hooks.on('stop', () => fromCallback(cb => httpServer.stop(cb)))
|
||||
|
||||
await app.hooks.start()
|
||||
|
||||
// Gracefully shutdown on signals.
|
||||
let alreadyCalled = false
|
||||
;['SIGINT', 'SIGTERM'].forEach(signal => {
|
||||
process.on(signal, () => {
|
||||
if (alreadyCalled) {
|
||||
warn('forced exit')
|
||||
process.exit(1)
|
||||
}
|
||||
alreadyCalled = true
|
||||
|
||||
info(`${signal} caught, stopping…`)
|
||||
app.hooks.stop()
|
||||
})
|
||||
})
|
||||
|
||||
return require('promise-toolbox/fromEvent')(app.hooks, 'stopped')
|
||||
}
|
||||
main(process.argv.slice(2)).then(
|
||||
() => {
|
||||
info('bye :-)')
|
||||
},
|
||||
error => {
|
||||
fatal(error)
|
||||
|
||||
process.exit(1)
|
||||
}
|
||||
)
|
||||
1
@xen-orchestra/xapi/.babelrc.js
Normal file
1
@xen-orchestra/xapi/.babelrc.js
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
28
@xen-orchestra/xapi/README.md
Normal file
28
@xen-orchestra/xapi/README.md
Normal file
@@ -0,0 +1,28 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @xen-orchestra/xapi
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/xapi)  [](https://bundlephobia.com/result?p=@xen-orchestra/xapi) [](https://npmjs.org/package/@xen-orchestra/xapi)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/xapi):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/xapi
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
0
@xen-orchestra/xapi/USAGE.md
Normal file
0
@xen-orchestra/xapi/USAGE.md
Normal file
54
@xen-orchestra/xapi/package.json
Normal file
54
@xen-orchestra/xapi/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "@xen-orchestra/xapi",
|
||||
"version": "0.4.1",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/xapi",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"main": "dist/",
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.2.3",
|
||||
"@babel/core": "^7.3.3",
|
||||
"@babel/plugin-proposal-decorators": "^7.3.0",
|
||||
"@babel/preset-env": "^7.3.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"xen-api": "^0.29.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"d3-time-format": "^2.2.3",
|
||||
"golike-defer": "^0.4.1",
|
||||
"lodash": "^4.17.15",
|
||||
"make-error": "^1.3.5",
|
||||
"promise-toolbox": "^0.16.0"
|
||||
},
|
||||
"private": false,
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
}
|
||||
}
|
||||
7
@xen-orchestra/xapi/src/_AttachedVdiError.js
Normal file
7
@xen-orchestra/xapi/src/_AttachedVdiError.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const { BaseError } = require('make-error')
|
||||
|
||||
module.exports = class AttachedVdiError extends BaseError {
|
||||
constructor() {
|
||||
super('this VDI is currently attached')
|
||||
}
|
||||
}
|
||||
9
@xen-orchestra/xapi/src/_extractOpaqueRef.js
Normal file
9
@xen-orchestra/xapi/src/_extractOpaqueRef.js
Normal file
@@ -0,0 +1,9 @@
|
||||
const OPAQUE_REF_RE = /OpaqueRef:[0-9a-z-]+/
|
||||
|
||||
module.exports = str => {
|
||||
const matches = OPAQUE_REF_RE.exec(str)
|
||||
if (!matches) {
|
||||
throw new Error('no opaque ref found')
|
||||
}
|
||||
return matches[0]
|
||||
}
|
||||
3
@xen-orchestra/xapi/src/_isValidRef.js
Normal file
3
@xen-orchestra/xapi/src/_isValidRef.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const { NULL_REF, isOpaqueRef } = require('xen-api')
|
||||
|
||||
module.exports = ref => ref !== NULL_REF && isOpaqueRef(ref)
|
||||
7
@xen-orchestra/xapi/src/_isVmRunning.js
Normal file
7
@xen-orchestra/xapi/src/_isVmRunning.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const RUNNING_POWER_STATES = {
|
||||
Running: true,
|
||||
Paused: true,
|
||||
}
|
||||
|
||||
module.exports = vmOrPowerState =>
|
||||
(typeof vmOrPowerState === 'string' ? vmOrPowerState : vmOrPowerState.power_state) in RUNNING_POWER_STATES
|
||||
119
@xen-orchestra/xapi/src/index.js
Normal file
119
@xen-orchestra/xapi/src/index.js
Normal file
@@ -0,0 +1,119 @@
|
||||
const assert = require('assert')
|
||||
const defer = require('promise-toolbox/defer')
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
const { Xapi: Base } = require('xen-api')
|
||||
|
||||
// VDI formats. (Raw is not available for delta vdi.)
|
||||
exports.VDI_FORMAT_RAW = 'raw'
|
||||
exports.VDI_FORMAT_VHD = 'vhd'
|
||||
|
||||
// Format a date (pseudo ISO 8601) from one XenServer get by
|
||||
// xapi.call('host.get_servertime', host.$ref) for example
|
||||
exports.formatDateTime = utcFormat('%Y%m%dT%H:%M:%SZ')
|
||||
|
||||
const parseDateTimeHelper = utcParse('%Y%m%dT%H:%M:%SZ')
|
||||
exports.parseDateTime = function (str, defaultValue) {
|
||||
const date = parseDateTimeHelper(str)
|
||||
if (date === null) {
|
||||
if (arguments.length > 1) {
|
||||
return defaultValue
|
||||
}
|
||||
throw new RangeError(`unable to parse XAPI datetime ${JSON.stringify(str)}`)
|
||||
}
|
||||
return date.getTime()
|
||||
}
|
||||
|
||||
const hasProps = o => {
|
||||
// eslint-disable-next-line no-unreachable-loop
|
||||
for (const key in o) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
class Xapi extends Base {
|
||||
constructor({ ignoreNobakVdis, maxUncoalescedVdis, ...opts }) {
|
||||
assert.notStrictEqual(ignoreNobakVdis, undefined)
|
||||
|
||||
super(opts)
|
||||
this._ignoreNobakVdis = ignoreNobakVdis
|
||||
this._maxUncoalescedVdis = maxUncoalescedVdis
|
||||
|
||||
const genericWatchers = (this._genericWatchers = new Set())
|
||||
const objectWatchers = (this._objectWatchers = { __proto__: null })
|
||||
|
||||
const onAddOrUpdate = records => {
|
||||
if (genericWatchers.size === 0 && !hasProps(objectWatchers)) {
|
||||
// no need to process records
|
||||
return
|
||||
}
|
||||
|
||||
Object.keys(records).forEach(id => {
|
||||
const object = records[id]
|
||||
|
||||
genericWatchers.forEach(watcher => {
|
||||
watcher(object)
|
||||
})
|
||||
|
||||
if (id in objectWatchers) {
|
||||
objectWatchers[id].resolve(object)
|
||||
delete objectWatchers[id]
|
||||
}
|
||||
const ref = object.$ref
|
||||
if (ref in objectWatchers) {
|
||||
objectWatchers[ref].resolve(object)
|
||||
delete objectWatchers[ref]
|
||||
}
|
||||
})
|
||||
}
|
||||
this.objects.on('add', onAddOrUpdate)
|
||||
this.objects.on('update', onAddOrUpdate)
|
||||
}
|
||||
|
||||
_waitObject(predicate) {
|
||||
if (typeof predicate === 'function') {
|
||||
const genericWatchers = this._genericWatchers
|
||||
|
||||
const { promise, resolve } = defer()
|
||||
genericWatchers.add(function watcher(obj) {
|
||||
if (predicate(obj)) {
|
||||
genericWatchers.delete(watcher)
|
||||
resolve(obj)
|
||||
}
|
||||
})
|
||||
return promise
|
||||
}
|
||||
|
||||
let watcher = this._objectWatchers[predicate]
|
||||
if (watcher === undefined) {
|
||||
watcher = this._objectWatchers[predicate] = defer()
|
||||
}
|
||||
return watcher.promise
|
||||
}
|
||||
}
|
||||
function mixin(mixins) {
|
||||
const xapiProto = Xapi.prototype
|
||||
const { defineProperties, getOwnPropertyDescriptor, getOwnPropertyNames } = Object
|
||||
const descriptors = { __proto__: null }
|
||||
Object.keys(mixins).forEach(prefix => {
|
||||
const mixinProto = mixins[prefix].prototype
|
||||
getOwnPropertyNames(mixinProto)
|
||||
.filter(_ => _ !== 'constructor')
|
||||
.forEach(name => {
|
||||
const key = name[0] === '_' ? name : `${prefix}_${name}`
|
||||
|
||||
assert(!(key in descriptors), `${key} is already defined`)
|
||||
|
||||
descriptors[key] = getOwnPropertyDescriptor(mixinProto, name)
|
||||
})
|
||||
})
|
||||
defineProperties(xapiProto, descriptors)
|
||||
}
|
||||
mixin({
|
||||
task: require('./task'),
|
||||
VBD: require('./vbd'),
|
||||
VDI: require('./vdi'),
|
||||
VIF: require('./vif'),
|
||||
VM: require('./vm'),
|
||||
})
|
||||
exports.Xapi = Xapi
|
||||
13
@xen-orchestra/xapi/src/task.js
Normal file
13
@xen-orchestra/xapi/src/task.js
Normal file
@@ -0,0 +1,13 @@
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
|
||||
module.exports = class Task {
|
||||
create(name = 'untitled task', description) {
|
||||
return this.createTask(`[XO] ${name}`, description)
|
||||
}
|
||||
|
||||
destroy(ref) {
|
||||
// pending task cannot be destroyed
|
||||
ignoreErrors.call(this.call('task.set_status', ref, 'cancelled'))
|
||||
return this.call('task.destroy', ref)
|
||||
}
|
||||
}
|
||||
89
@xen-orchestra/xapi/src/vbd.js
Normal file
89
@xen-orchestra/xapi/src/vbd.js
Normal file
@@ -0,0 +1,89 @@
|
||||
const identity = require('lodash/identity')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
|
||||
const isValidRef = require('./_isValidRef')
|
||||
const isVmRunning = require('./_isVmRunning')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = class Vbd {
|
||||
async create({
|
||||
bootable = false,
|
||||
currently_attached = false,
|
||||
device = '',
|
||||
other_config = {},
|
||||
qos_algorithm_params = {},
|
||||
qos_algorithm_type = '',
|
||||
type = 'Disk',
|
||||
unpluggable = false,
|
||||
userdevice,
|
||||
VDI,
|
||||
VM,
|
||||
|
||||
empty = !isValidRef(VDI),
|
||||
mode = type === 'Disk' ? 'RW' : 'RO',
|
||||
}) {
|
||||
if (userdevice == null) {
|
||||
const allowed = await this.call('VM.get_allowed_VBD_devices', VM)
|
||||
const { length } = allowed
|
||||
if (length === 0) {
|
||||
throw new Error('no allowed VBD devices')
|
||||
}
|
||||
|
||||
if (type === 'CD') {
|
||||
// Choose position 3 if allowed.
|
||||
userdevice = allowed.includes('3') ? '3' : allowed[0]
|
||||
} else {
|
||||
userdevice = allowed[0]
|
||||
|
||||
// Avoid userdevice 3 if possible.
|
||||
if (userdevice === '3' && length > 1) {
|
||||
userdevice = allowed[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const powerState = await this.getField('VM', VM, 'power_state')
|
||||
const ifVmSuspended = powerState === 'Suspended' ? identity : noop
|
||||
|
||||
// By default a VBD is unpluggable.
|
||||
const vbdRef = await this.call('VBD.create', {
|
||||
bootable,
|
||||
currently_attached: ifVmSuspended(currently_attached),
|
||||
device: ifVmSuspended(device),
|
||||
empty,
|
||||
mode,
|
||||
other_config,
|
||||
qos_algorithm_params,
|
||||
qos_algorithm_type,
|
||||
type,
|
||||
unpluggable,
|
||||
userdevice,
|
||||
VDI,
|
||||
VM,
|
||||
})
|
||||
|
||||
if (isVmRunning(powerState)) {
|
||||
await this.callAsync('VBD.plug', vbdRef)
|
||||
}
|
||||
}
|
||||
|
||||
async unplug(ref) {
|
||||
// TODO: check if VBD is attached before
|
||||
try {
|
||||
await this.call('VBD.unplug_force', ref)
|
||||
} catch (error) {
|
||||
if (error.code !== 'VBD_NOT_UNPLUGGABLE') {
|
||||
throw error
|
||||
}
|
||||
|
||||
await this.setField('VBD', ref, 'unpluggable', true)
|
||||
await this.call('VBD.unplug_force', ref)
|
||||
}
|
||||
}
|
||||
|
||||
async destroy(ref) {
|
||||
await ignoreErrors.call(this.VBD_unplug(ref))
|
||||
await this.call('VBD.destroy', ref)
|
||||
}
|
||||
}
|
||||
101
@xen-orchestra/xapi/src/vdi.js
Normal file
101
@xen-orchestra/xapi/src/vdi.js
Normal file
@@ -0,0 +1,101 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
|
||||
const extractOpaqueRef = require('./_extractOpaqueRef')
|
||||
|
||||
module.exports = class Vdi {
|
||||
async clone(vdiRef) {
|
||||
return extractOpaqueRef(await this.callAsync('VDI.clone', vdiRef))
|
||||
}
|
||||
|
||||
async destroy(vdiRef) {
|
||||
await this.callAsync('VDI.destroy', vdiRef)
|
||||
}
|
||||
|
||||
async create(
|
||||
{
|
||||
name_description,
|
||||
name_label,
|
||||
other_config = {},
|
||||
read_only = false,
|
||||
sharable = false,
|
||||
sm_config,
|
||||
SR,
|
||||
tags,
|
||||
type = 'user',
|
||||
virtual_size,
|
||||
xenstore_data,
|
||||
},
|
||||
{
|
||||
// blindly copying `sm_config` from another VDI can create problems,
|
||||
// therefore it is ignored by default by this method
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4482
|
||||
setSmConfig = false,
|
||||
} = {}
|
||||
) {
|
||||
return this.call('VDI.create', {
|
||||
name_description,
|
||||
name_label,
|
||||
other_config,
|
||||
read_only,
|
||||
sharable,
|
||||
sm_config: setSmConfig ? sm_config : undefined,
|
||||
SR,
|
||||
tags,
|
||||
type,
|
||||
virtual_size: virtual_size,
|
||||
xenstore_data,
|
||||
})
|
||||
}
|
||||
|
||||
async exportContent(ref, { baseRef, cancelToken = CancelToken.none, format }) {
|
||||
const query = {
|
||||
format,
|
||||
vdi: ref,
|
||||
}
|
||||
if (baseRef !== undefined) {
|
||||
query.base = baseRef
|
||||
}
|
||||
try {
|
||||
return await this.getResource(cancelToken, '/export_raw_vdi/', {
|
||||
query,
|
||||
task: await this.task_create(`Exporting content of VDI ${await this.getField('VDI', ref, 'name_label')}`),
|
||||
})
|
||||
} catch (error) {
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, vdi] = await Promise.all([
|
||||
this.getRecord('host', this.pool.master),
|
||||
this.getRecord('VDI', ref),
|
||||
])
|
||||
error.pool_master = poolMaster
|
||||
error.SR = await this.getRecord('SR', vdi.SR)
|
||||
error.VDI = vdi
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async importContent(ref, stream, { cancelToken = CancelToken.none, format }) {
|
||||
if (stream.length === undefined) {
|
||||
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
|
||||
}
|
||||
try {
|
||||
await this.putResource(cancelToken, stream, '/import_raw_vdi/', {
|
||||
query: {
|
||||
format,
|
||||
vdi: ref,
|
||||
},
|
||||
task: await this.task_create(`Importing content into VDI ${await this.getField('VDI', ref, 'name_label')}`),
|
||||
})
|
||||
} catch (error) {
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, vdi] = await Promise.all([
|
||||
this.getRecord('host', this.pool.master),
|
||||
this.getRecord('VDI', ref),
|
||||
])
|
||||
error.pool_master = poolMaster
|
||||
error.SR = await this.getRecord('SR', vdi.SR)
|
||||
error.VDI = vdi
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
46
@xen-orchestra/xapi/src/vif.js
Normal file
46
@xen-orchestra/xapi/src/vif.js
Normal file
@@ -0,0 +1,46 @@
|
||||
const isVmRunning = require('./_isVmRunning')
|
||||
|
||||
module.exports = class Vif {
|
||||
async create({
|
||||
currently_attached = true,
|
||||
device,
|
||||
ipv4_allowed,
|
||||
ipv6_allowed,
|
||||
locking_mode,
|
||||
MAC,
|
||||
MTU,
|
||||
network,
|
||||
other_config = {},
|
||||
qos_algorithm_params = {},
|
||||
qos_algorithm_type = '',
|
||||
VM,
|
||||
}) {
|
||||
const [powerState, ...rest] = await Promise.all([
|
||||
this.getField('VM', VM, 'power_state'),
|
||||
device ?? (await this.call('VM.get_allowed_VIF_devices', VM))[0],
|
||||
MTU ?? (await this.getField('network', network, 'MTU')),
|
||||
])
|
||||
;[device, MTU] = rest
|
||||
|
||||
const vifRef = await this.call('VIF.create', {
|
||||
currently_attached: powerState === 'Suspended' ? currently_attached : undefined,
|
||||
device,
|
||||
ipv4_allowed,
|
||||
ipv6_allowed,
|
||||
locking_mode,
|
||||
MAC,
|
||||
MTU,
|
||||
network,
|
||||
other_config,
|
||||
qos_algorithm_params,
|
||||
qos_algorithm_type,
|
||||
VM,
|
||||
})
|
||||
|
||||
if (currently_attached && isVmRunning(powerState)) {
|
||||
await this.callAsync('VIF.plug', vifRef)
|
||||
}
|
||||
|
||||
return vifRef
|
||||
}
|
||||
}
|
||||
490
@xen-orchestra/xapi/src/vm.js
Normal file
490
@xen-orchestra/xapi/src/vm.js
Normal file
@@ -0,0 +1,490 @@
|
||||
const asyncMap = require('@xen-orchestra/async-map').default
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const defer = require('golike-defer').default
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const pickBy = require('lodash/pickBy')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const pRetry = require('promise-toolbox/retry')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { NULL_REF } = require('xen-api')
|
||||
|
||||
const AttachedVdiError = require('./_AttachedVdiError')
|
||||
const extractOpaqueRef = require('./_extractOpaqueRef')
|
||||
const isValidRef = require('./_isValidRef')
|
||||
const isVmRunning = require('./_isVmRunning')
|
||||
|
||||
const { warn } = createLogger('xo:xapi:vm')
|
||||
|
||||
const BIOS_STRINGS_KEYS = new Set([
|
||||
'baseboard-asset-tag',
|
||||
'baseboard-location-in-chassis',
|
||||
'baseboard-manufacturer',
|
||||
'baseboard-product-name',
|
||||
'baseboard-serial-number',
|
||||
'baseboard-version',
|
||||
'bios-vendor',
|
||||
'bios-version',
|
||||
'enclosure-asset-tag',
|
||||
'system-manufacturer',
|
||||
'system-product-name',
|
||||
'system-serial-number',
|
||||
'system-version',
|
||||
])
|
||||
const cleanBiosStrings = biosStrings => {
|
||||
if (biosStrings !== undefined) {
|
||||
biosStrings = pickBy(biosStrings, (value, key) => value !== '' && BIOS_STRINGS_KEYS.has(key))
|
||||
|
||||
if (Object.keys(biosStrings).length !== 0) {
|
||||
return biosStrings
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function safeGetRecord(xapi, type, ref) {
|
||||
try {
|
||||
return await xapi.getRecord(type, ref)
|
||||
} catch (_) {
|
||||
return ref
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = class Vm {
|
||||
async _assertHealthyVdiChain(vdiRefOrUuid, cache, tolerance) {
|
||||
let vdi = cache[vdiRefOrUuid]
|
||||
if (vdi === undefined) {
|
||||
try {
|
||||
vdi = await this[vdiRefOrUuid.startsWith('OpaqueRef:') ? 'getRecord' : 'getRecordByUuid']('VDI', vdiRefOrUuid)
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
return
|
||||
}
|
||||
cache[vdi.$ref] = vdi
|
||||
cache[vdi.uuid] = vdi
|
||||
}
|
||||
|
||||
if (!vdi.managed) {
|
||||
const srRef = vdi.SR
|
||||
let childrenMap = cache[srRef]
|
||||
if (childrenMap === undefined) {
|
||||
const vdiRefs = await this.getField('SR', srRef, 'VDIs')
|
||||
childrenMap = groupBy(
|
||||
(
|
||||
await Promise.all(
|
||||
vdiRefs.map(async vdiRef => {
|
||||
let vdi = cache[vdiRef]
|
||||
if (vdi === undefined) {
|
||||
try {
|
||||
vdi = await this.getRecord('VDI', vdiRef)
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
return
|
||||
}
|
||||
cache[vdiRef] = vdi
|
||||
cache[vdi.uuid] = vdi
|
||||
}
|
||||
return vdi
|
||||
})
|
||||
)
|
||||
).filter(_ => _ !== undefined),
|
||||
vdi => vdi.sm_config['vhd-parent']
|
||||
)
|
||||
}
|
||||
|
||||
// an unmanaged VDI should not have exactly one child: they
|
||||
// should coalesce
|
||||
const children = childrenMap[vdi.uuid]
|
||||
if (
|
||||
children.length === 1 &&
|
||||
!children[0].managed && // some SRs do not coalesce the leaf
|
||||
tolerance-- <= 0
|
||||
) {
|
||||
throw new Error('unhealthy VDI chain')
|
||||
}
|
||||
}
|
||||
|
||||
const parentUuid = vdi.sm_config['vhd-parent']
|
||||
if (parentUuid !== undefined) {
|
||||
return this._assertHealthyVdiChain(parentUuid, cache, tolerance)
|
||||
}
|
||||
}
|
||||
|
||||
async assertHealthyVdiChains(vmRef, tolerance = this._maxUncoalescedVdis) {
|
||||
const vdiRefs = {}
|
||||
;(await this.getRecords('VBD', await this.getField('VM', vmRef, 'VBDs'))).forEach(({ VDI: ref }) => {
|
||||
if (isValidRef(ref)) {
|
||||
vdiRefs[ref] = true
|
||||
}
|
||||
})
|
||||
const cache = { __proto__: null }
|
||||
for (const vdiRef of Object.keys(vdiRefs)) {
|
||||
await this._assertHealthyVdiChain(vdiRef, cache, tolerance)
|
||||
}
|
||||
}
|
||||
|
||||
@cancelable
|
||||
async checkpoint($cancelToken, vmRef, nameLabel) {
|
||||
if (nameLabel === undefined) {
|
||||
nameLabel = await this.getField('VM', vmRef, 'name_label')
|
||||
}
|
||||
try {
|
||||
return await this.callAsync($cancelToken, 'VM.checkpoint', vmRef, nameLabel).then(extractOpaqueRef)
|
||||
} catch (error) {
|
||||
if (error.code === 'VM_BAD_POWER_STATE') {
|
||||
return this.VM_snapshot($cancelToken, vmRef, nameLabel)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
async create(
|
||||
$defer,
|
||||
{
|
||||
actions_after_crash = 'reboot',
|
||||
actions_after_reboot = 'reboot',
|
||||
actions_after_shutdown = 'destroy',
|
||||
affinity = NULL_REF,
|
||||
appliance,
|
||||
blocked_operations,
|
||||
domain_type,
|
||||
generation_id,
|
||||
ha_restart_priority,
|
||||
hardware_platform_version,
|
||||
has_vendor_device = false, // Avoid issue with some Dundee builds.
|
||||
HVM_boot_params,
|
||||
HVM_boot_policy,
|
||||
HVM_shadow_multiplier,
|
||||
is_a_template = false,
|
||||
is_vmss_snapshot,
|
||||
last_boot_CPU_flags, // Used when the VM is created Suspended
|
||||
last_booted_record, // Used when the VM is created Suspended
|
||||
memory_static_max,
|
||||
memory_static_min,
|
||||
name_description,
|
||||
name_label,
|
||||
// NVRAM, // experimental
|
||||
order,
|
||||
other_config = {},
|
||||
PCI_bus = '',
|
||||
platform,
|
||||
PV_args,
|
||||
PV_bootloader_args,
|
||||
PV_bootloader,
|
||||
PV_kernel,
|
||||
PV_legacy_args,
|
||||
PV_ramdisk,
|
||||
recommendations,
|
||||
reference_label,
|
||||
shutdown_delay,
|
||||
snapshot_schedule,
|
||||
start_delay,
|
||||
suspend_SR,
|
||||
tags,
|
||||
user_version,
|
||||
VCPUs_at_startup,
|
||||
VCPUs_max,
|
||||
VCPUs_params,
|
||||
version,
|
||||
xenstore_data,
|
||||
|
||||
memory_dynamic_max = memory_static_max,
|
||||
memory_dynamic_min = memory_static_min,
|
||||
},
|
||||
{
|
||||
// not supported by `VM.create`, therefore it should be passed explicitly
|
||||
bios_strings,
|
||||
|
||||
// if set, will create the VM in Suspended power_state with this VDI
|
||||
//
|
||||
// it's a separate param because it's not supported for all versions of
|
||||
// XCP-ng/XenServer and should be passed explicitly
|
||||
suspend_VDI,
|
||||
} = {}
|
||||
) {
|
||||
const ref = await this.call('VM.create', {
|
||||
actions_after_crash,
|
||||
actions_after_reboot,
|
||||
actions_after_shutdown,
|
||||
affinity,
|
||||
HVM_boot_params,
|
||||
HVM_boot_policy,
|
||||
is_a_template,
|
||||
memory_dynamic_max,
|
||||
memory_dynamic_min,
|
||||
memory_static_max,
|
||||
memory_static_min,
|
||||
other_config,
|
||||
PCI_bus,
|
||||
platform,
|
||||
PV_args,
|
||||
PV_bootloader_args,
|
||||
PV_bootloader,
|
||||
PV_kernel,
|
||||
PV_legacy_args,
|
||||
PV_ramdisk,
|
||||
recommendations,
|
||||
user_version,
|
||||
VCPUs_at_startup,
|
||||
VCPUs_max,
|
||||
VCPUs_params,
|
||||
|
||||
// Optional fields.
|
||||
appliance,
|
||||
blocked_operations,
|
||||
domain_type,
|
||||
generation_id,
|
||||
ha_restart_priority,
|
||||
hardware_platform_version,
|
||||
has_vendor_device,
|
||||
HVM_shadow_multiplier,
|
||||
is_vmss_snapshot,
|
||||
name_description,
|
||||
name_label,
|
||||
order,
|
||||
reference_label,
|
||||
shutdown_delay,
|
||||
snapshot_schedule,
|
||||
start_delay,
|
||||
suspend_SR,
|
||||
tags,
|
||||
version,
|
||||
xenstore_data,
|
||||
|
||||
// VM created Suspended
|
||||
last_boot_CPU_flags,
|
||||
last_booted_record,
|
||||
power_state: suspend_VDI !== undefined ? 'Suspended' : undefined,
|
||||
suspend_VDI,
|
||||
})
|
||||
$defer.onFailure.call(this, 'VM.destroy', ref)
|
||||
|
||||
bios_strings = cleanBiosStrings(bios_strings)
|
||||
if (bios_strings !== undefined) {
|
||||
await this.call('VM.set_bios_strings', ref, bios_strings)
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
async destroy(vmRef, { deleteDisks = true, force = false, forceDeleteDefaultTemplate = false } = {}) {
|
||||
const vm = await this.getRecord('VM', vmRef)
|
||||
if (!force && 'destroy' in vm.blocked_operations) {
|
||||
throw new Error('destroy is blocked')
|
||||
}
|
||||
if (!forceDeleteDefaultTemplate && vm.other_config.default_template === 'true') {
|
||||
throw new Error('VM is default template')
|
||||
}
|
||||
// It is necessary for suspended VMs to be shut down
|
||||
// to be able to delete their VDIs.
|
||||
if (vm.power_state !== 'Halted') {
|
||||
await this.call('VM.hard_shutdown', vmRef)
|
||||
}
|
||||
await Promise.all([
|
||||
vm.set_is_a_template(false),
|
||||
vm.update_blocked_operations('destroy', null),
|
||||
vm.update_other_config('default_template', null),
|
||||
])
|
||||
// must be done before destroying the VM
|
||||
const disks = (
|
||||
await asyncMap(this.getRecords('VBD', vm.VBDs), async vbd => {
|
||||
let vdiRef
|
||||
if (vbd.type === 'Disk' && isValidRef((vdiRef = vbd.VDI))) {
|
||||
return vdiRef
|
||||
}
|
||||
})
|
||||
).filter(_ => _ !== undefined)
|
||||
// this cannot be done in parallel, otherwise disks and snapshots will be
|
||||
// destroyed even if this fails
|
||||
await this.call('VM.destroy', vmRef)
|
||||
return Promise.all([
|
||||
ignoreErrors.call(asyncMap(vm.snapshots, _ => this.VM_destroy(_))),
|
||||
deleteDisks &&
|
||||
ignoreErrors.call(
|
||||
asyncMap(disks, vdiRef =>
|
||||
pRetry(
|
||||
async () => {
|
||||
// list VMs connected to this VDI
|
||||
const vmRefs = await asyncMap(this.getField('VDI', vdiRef, 'VBDs'), vbdRef =>
|
||||
this.getField('VBD', vbdRef, 'VM')
|
||||
)
|
||||
if (vmRefs.every(_ => _ === vmRef)) {
|
||||
return this.callAsync('VDI.destroy', vdiRef)
|
||||
}
|
||||
throw new AttachedVdiError()
|
||||
},
|
||||
{
|
||||
delay: 5e3,
|
||||
tries: 2,
|
||||
when: AttachedVdiError,
|
||||
}
|
||||
)
|
||||
)
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
@cancelable
|
||||
@defer
|
||||
async export($defer, $cancelToken, vmRef, { compress = false, useSnapshot } = {}) {
|
||||
const vm = await this.getRecord('VM', vmRef)
|
||||
const taskRef = await this.task_create('VM export', vm.name_label)
|
||||
$defer.onFailure.call(this, 'task_destroy', taskRef)
|
||||
if (useSnapshot === undefined) {
|
||||
useSnapshot = isVmRunning(vm)
|
||||
}
|
||||
const exportedVmRef = useSnapshot
|
||||
? await this.VM_snapshot($cancelToken, vmRef, `[XO Export] ${vm.name_label}`)
|
||||
: vmRef
|
||||
try {
|
||||
return await this.getResource($cancelToken, '/export/', {
|
||||
query: {
|
||||
ref: exportedVmRef,
|
||||
use_compression: compress === 'zstd' ? 'zstd' : compress === true || compress === 'gzip' ? 'true' : 'false',
|
||||
},
|
||||
task: taskRef,
|
||||
})
|
||||
} catch (error) {
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, exportedVm] = await Promise.all([
|
||||
safeGetRecord(this, 'host', this.pool.master),
|
||||
useSnapshot ? safeGetRecord(this, 'VM', exportedVmRef) : vmRef,
|
||||
])
|
||||
error.pool_master = poolMaster
|
||||
error.VM = exportedVm
|
||||
throw error
|
||||
} finally {
|
||||
}
|
||||
// if (useSnapshot) {
|
||||
// const destroySnapshot = () => this.deleteVm(exportedVm)::ignoreErrors()
|
||||
// promise.then(_ => _.task::pFinally(destroySnapshot), destroySnapshot)
|
||||
// }
|
||||
//
|
||||
// return promise
|
||||
}
|
||||
|
||||
async getDisks(vmRef) {
|
||||
const disks = { __proto__: null }
|
||||
;(await this.getRecords('VBD', await this.getField('VM', vmRef, 'VBDs'))).map(async vbd => {
|
||||
if (vbd.type === 'Disk' && isValidRef(vbd.VDI)) {
|
||||
disks[vbd.VDI] = true
|
||||
}
|
||||
})
|
||||
return Object.keys(disks)
|
||||
}
|
||||
|
||||
async import(stream, srRef, onVmCreation = undefined) {
|
||||
const taskRef = await this.task_create('VM import')
|
||||
const query = {}
|
||||
if (srRef !== undefined) {
|
||||
query.sr_id = srRef
|
||||
}
|
||||
if (onVmCreation != null) {
|
||||
ignoreErrors.call(
|
||||
this._waitObject(
|
||||
obj => obj != null && obj.current_operations != null && taskRef in obj.current_operations
|
||||
).then(onVmCreation)
|
||||
)
|
||||
}
|
||||
try {
|
||||
return await this.putResource(stream, '/import/', {
|
||||
query,
|
||||
task: taskRef,
|
||||
}).then(extractOpaqueRef)
|
||||
} catch (error) {
|
||||
// augment the error with as much relevant info as possible
|
||||
const [poolMaster, sr] = await Promise.all([
|
||||
safeGetRecord(this, 'host', this.pool.master),
|
||||
safeGetRecord(this, 'SR', srRef),
|
||||
])
|
||||
error.pool_master = poolMaster
|
||||
error.SR = sr
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
@cancelable
|
||||
async snapshot($cancelToken, $defer, vmRef, nameLabel) {
|
||||
const vm = await this.getRecord('VM', vmRef)
|
||||
// cannot unplug VBDs on Running, Paused and Suspended VMs
|
||||
if (vm.power_state === 'Halted' && this._ignoreNobakVdis) {
|
||||
await asyncMap(vm.VBDs, async vbdRef => {
|
||||
const vbd = await this.getRecord('VBD', vbdRef)
|
||||
if (
|
||||
vbd.type === 'Disk' &&
|
||||
isValidRef(vbd.VDI) &&
|
||||
(await this.getField('VDI', vbd.VDI, 'name_label')).startsWith('[NOBAK]')
|
||||
) {
|
||||
await this.VBD_destroy(vbdRef)
|
||||
$defer.call(this, 'VBD_create', vbd)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (nameLabel === undefined) {
|
||||
nameLabel = vm.name_label
|
||||
}
|
||||
let ref
|
||||
do {
|
||||
if (!vm.tags.includes('xo-disable-quiesce')) {
|
||||
try {
|
||||
ref = await pRetry(
|
||||
async bail => {
|
||||
try {
|
||||
return await this.callAsync($cancelToken, 'VM.snapshot_with_quiesce', vmRef, nameLabel)
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'VM_SNAPSHOT_WITH_QUIESCE_FAILED') {
|
||||
throw bail(error)
|
||||
}
|
||||
// detect and remove new broken snapshots
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/3936
|
||||
const prevSnapshotRefs = new Set(vm.snapshots)
|
||||
const snapshotNameLabelPrefix = `Snapshot of ${vm.uuid} [`
|
||||
await vm.refresh_snapshots()
|
||||
const createdSnapshots = (
|
||||
await this.getRecords(
|
||||
'VM',
|
||||
vm.snapshots.filter(_ => !prevSnapshotRefs.has(_))
|
||||
)
|
||||
).filter(_ => _.name_label.startsWith(snapshotNameLabelPrefix))
|
||||
// be safe: only delete if there was a single match
|
||||
if (createdSnapshots.length === 1) {
|
||||
ignoreErrors.call(this.VM_destroy(createdSnapshots[0]))
|
||||
}
|
||||
throw error
|
||||
}
|
||||
},
|
||||
{
|
||||
delay: 60e3,
|
||||
tries: 3,
|
||||
}
|
||||
).then(extractOpaqueRef)
|
||||
ignoreErrors.call(this.call('VM.add_tags', ref, 'quiesce'))
|
||||
break
|
||||
} catch (error) {
|
||||
const { code } = error
|
||||
if (
|
||||
// removed in CH 8.1
|
||||
code !== 'MESSAGE_REMOVED' &&
|
||||
code !== 'VM_SNAPSHOT_WITH_QUIESCE_NOT_SUPPORTED' &&
|
||||
// quiesce only work on a running VM
|
||||
code !== 'VM_BAD_POWER_STATE' &&
|
||||
// quiesce failed, fallback on standard snapshot
|
||||
// TODO: emit warning
|
||||
code !== 'VM_SNAPSHOT_WITH_QUIESCE_FAILED'
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
ref = await this.callAsync($cancelToken, 'VM.snapshot', vmRef, nameLabel).then(extractOpaqueRef)
|
||||
} while (false)
|
||||
|
||||
// Don't set `is_a_template = false` like done in xo-server, it does not
|
||||
// appear necessary and can trigger license issues, see
|
||||
// https://bugs.xenserver.org/browse/XSO-766
|
||||
|
||||
return ref
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ You can schedule full backups of your VMs, by exporting them to the local XOA fi
|
||||
:::tip
|
||||
Full backups are space consuming! But they allow a very simple restoration without anything to think of (the file will contain all the VM disks and information). To use less space and data transferred, take a look at the [delta backups](delta_backups.md) feature.
|
||||
:::
|
||||
|
||||
## Backup without snapshot
|
||||
|
||||
In some cases you will need to do a backup without doing a snapshot.
|
||||
@@ -14,4 +15,4 @@ In some cases you will need to do a backup without doing a snapshot.
|
||||
The most common use case for this is a large VM on a small local SR where you just don't have the space to do a snapshot before backup.
|
||||
So for that you need to open the advanced settings of your backup job, and check the offline backup check box:
|
||||
|
||||

|
||||

|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/backups": "^0.4.0",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
|
||||
@@ -12,6 +12,7 @@ import { PassThrough } from 'stream'
|
||||
import { AssertionError } from 'assert'
|
||||
import { basename, dirname } from 'path'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { formatVmBackup } from '@xen-orchestra/backups/formatVmBackup'
|
||||
import { invalidParameters } from 'xo-common/api-errors'
|
||||
import { isValidXva } from '@xen-orchestra/backups/isValidXva'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
@@ -965,37 +966,13 @@ export default class BackupNg {
|
||||
await Promise.all(
|
||||
entries.map(async vmUuid => {
|
||||
// $FlowFixMe don't know what is the problem (JFT)
|
||||
const backups = await this._listVmBackups(handler, vmUuid)
|
||||
const backups = await this._listVmBackups(handler, remoteId, vmUuid)
|
||||
|
||||
if (backups.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
backupsByVm[vmUuid] = backups.map(backup => ({
|
||||
disks:
|
||||
backup.vhds === undefined
|
||||
? []
|
||||
: Object.keys(backup.vhds).map(vdiId => {
|
||||
const vdi = backup.vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${backup.vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
}),
|
||||
|
||||
// inject an id usable by importVmBackupNg()
|
||||
id: `${remoteId}/${backup._filename}`,
|
||||
jobId: backup.jobId,
|
||||
mode: backup.mode,
|
||||
scheduleId: backup.scheduleId,
|
||||
size: backup.size,
|
||||
timestamp: backup.timestamp,
|
||||
vm: {
|
||||
name_description: backup.vm.name_description,
|
||||
name_label: backup.vm.name_label,
|
||||
},
|
||||
}))
|
||||
backupsByVm[vmUuid] = backups.map(backup => formatVmBackup(backup))
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
@@ -1349,7 +1326,7 @@ export default class BackupNg {
|
||||
|
||||
const oldBackups: MetadataFull[] = (getOldEntries(
|
||||
exportRetention - 1,
|
||||
await this._listVmBackups(handler, vm, _ => _.mode === 'full' && _.scheduleId === scheduleId)
|
||||
await this._listVmBackups(handler, remoteId, vm, _ => _.mode === 'full' && _.scheduleId === scheduleId)
|
||||
): any)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
@@ -1646,7 +1623,7 @@ export default class BackupNg {
|
||||
|
||||
const oldBackups: MetadataDelta[] = (getOldEntries(
|
||||
exportRetention - 1,
|
||||
await this._listVmBackups(handler, vm, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
await this._listVmBackups(handler, remoteId, vm, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
): any)
|
||||
|
||||
// FIXME: implement optimized multiple VHDs merging with synthetic
|
||||
@@ -1897,6 +1874,7 @@ export default class BackupNg {
|
||||
|
||||
async _listVmBackups(
|
||||
handler: RemoteHandler,
|
||||
remoteId,
|
||||
vm: Object | string,
|
||||
predicate?: Metadata => boolean
|
||||
): Promise<Metadata[]> {
|
||||
@@ -1924,6 +1902,7 @@ export default class BackupNg {
|
||||
Object.defineProperty(metadata, '_filename', {
|
||||
value: path,
|
||||
})
|
||||
metadata.id = `${remoteId}/${path}`
|
||||
backups.push(metadata)
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
323
yarn.lock
323
yarn.lock
@@ -2,7 +2,7 @@
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
"@babel/cli@^7.0.0", "@babel/cli@^7.1.5", "@babel/cli@^7.4.4", "@babel/cli@^7.7.0", "@babel/cli@^7.7.4":
|
||||
"@babel/cli@^7.0.0", "@babel/cli@^7.1.5", "@babel/cli@^7.2.3", "@babel/cli@^7.4.4", "@babel/cli@^7.7.0", "@babel/cli@^7.7.4":
|
||||
version "7.12.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/cli/-/cli-7.12.10.tgz#67a1015b1cd505bde1696196febf910c4c339a48"
|
||||
integrity sha512-+y4ZnePpvWs1fc/LhZRTHkTesbXkyBYuOB+5CyodZqrEuETXi3zOVfpAQIdgC3lXbHLTDG9dQosxR9BhvLKDLQ==
|
||||
@@ -31,7 +31,7 @@
|
||||
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.12.7.tgz#9329b4782a7d6bbd7eef57e11addf91ee3ef1e41"
|
||||
integrity sha512-YaxPMGs/XIWtYqrdEOZOCPsVWfEoriXopnsz3/i7apYPXQ3698UFhS6dVT1KN5qOsWmVgw/FOrmQgpRaZayGsw==
|
||||
|
||||
"@babel/core@^7.0.0", "@babel/core@^7.1.0", "@babel/core@^7.1.5", "@babel/core@^7.1.6", "@babel/core@^7.11.0", "@babel/core@^7.4.4", "@babel/core@^7.7.2", "@babel/core@^7.7.4", "@babel/core@^7.7.5", "@babel/core@^7.8.4":
|
||||
"@babel/core@^7.0.0", "@babel/core@^7.1.0", "@babel/core@^7.1.5", "@babel/core@^7.1.6", "@babel/core@^7.11.0", "@babel/core@^7.3.3", "@babel/core@^7.4.4", "@babel/core@^7.7.2", "@babel/core@^7.7.4", "@babel/core@^7.7.5", "@babel/core@^7.8.4":
|
||||
version "7.12.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.10.tgz#b79a2e1b9f70ed3d84bbfb6d8c4ef825f606bccd"
|
||||
integrity sha512-eTAlQKq65zHfkHZV0sIVODCPGVgoo1HdBlbSLi9CqOzuZanMv2ihzY+4paiKr1mH+XmYESMAmJ/dpZ68eN6d8w==
|
||||
@@ -277,7 +277,7 @@
|
||||
"@babel/helper-remap-async-to-generator" "^7.12.1"
|
||||
"@babel/plugin-syntax-async-generators" "^7.8.0"
|
||||
|
||||
"@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.3.4", "@babel/plugin-proposal-class-properties@^7.8.3":
|
||||
"@babel/plugin-proposal-class-properties@^7.1.0", "@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.3.4", "@babel/plugin-proposal-class-properties@^7.8.3":
|
||||
version "7.12.1"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.12.1.tgz#a082ff541f2a29a4821065b8add9346c0c16e5de"
|
||||
integrity sha512-cKp3dlQsFsEs5CWKnN7BnSHOd0EOW8EKpEjkoz1pO2E5KzIDNV9Ros1b0CnmbVgAGXJubOYVBOGCT1OmJwOI7w==
|
||||
@@ -285,7 +285,7 @@
|
||||
"@babel/helper-create-class-features-plugin" "^7.12.1"
|
||||
"@babel/helper-plugin-utils" "^7.10.4"
|
||||
|
||||
"@babel/plugin-proposal-decorators@^7.0.0", "@babel/plugin-proposal-decorators@^7.1.6", "@babel/plugin-proposal-decorators@^7.4.0", "@babel/plugin-proposal-decorators@^7.8.0", "@babel/plugin-proposal-decorators@^7.8.3":
|
||||
"@babel/plugin-proposal-decorators@^7.0.0", "@babel/plugin-proposal-decorators@^7.1.6", "@babel/plugin-proposal-decorators@^7.3.0", "@babel/plugin-proposal-decorators@^7.4.0", "@babel/plugin-proposal-decorators@^7.8.0", "@babel/plugin-proposal-decorators@^7.8.3":
|
||||
version "7.12.12"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.12.12.tgz#067a6d3d6ca86d54cf56bb183239199c20daeafe"
|
||||
integrity sha512-fhkE9lJYpw2mjHelBpM2zCbaA11aov2GJs7q4cFaXNrWx0H3bW58H9Esy2rdtYOghFBEYUDRIpvlgi+ZD+AvvQ==
|
||||
@@ -342,7 +342,7 @@
|
||||
"@babel/helper-plugin-utils" "^7.10.4"
|
||||
"@babel/plugin-syntax-logical-assignment-operators" "^7.10.4"
|
||||
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator@^7.0.0", "@babel/plugin-proposal-nullish-coalescing-operator@^7.12.1", "@babel/plugin-proposal-nullish-coalescing-operator@^7.2.0", "@babel/plugin-proposal-nullish-coalescing-operator@^7.4.3", "@babel/plugin-proposal-nullish-coalescing-operator@^7.4.4", "@babel/plugin-proposal-nullish-coalescing-operator@^7.8.0":
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator@^7.0.0", "@babel/plugin-proposal-nullish-coalescing-operator@^7.12.1", "@babel/plugin-proposal-nullish-coalescing-operator@^7.2.0", "@babel/plugin-proposal-nullish-coalescing-operator@^7.4.3", "@babel/plugin-proposal-nullish-coalescing-operator@^7.4.4", "@babel/plugin-proposal-nullish-coalescing-operator@^7.7.4", "@babel/plugin-proposal-nullish-coalescing-operator@^7.8.0":
|
||||
version "7.12.1"
|
||||
resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.12.1.tgz#3ed4fff31c015e7f3f1467f190dbe545cd7b046c"
|
||||
integrity sha512-nZY0ESiaQDI1y96+jk6VxMOaL4LPo/QDHBqL+SF3/vl6dHkTwHlOI8L4ZwuRBHgakRBw5zsVylel7QPbbGuYgg==
|
||||
@@ -829,7 +829,7 @@
|
||||
"@babel/helper-create-regexp-features-plugin" "^7.12.1"
|
||||
"@babel/helper-plugin-utils" "^7.10.4"
|
||||
|
||||
"@babel/preset-env@^7.0.0", "@babel/preset-env@^7.1.5", "@babel/preset-env@^7.1.6", "@babel/preset-env@^7.11.0", "@babel/preset-env@^7.4.4", "@babel/preset-env@^7.7.1", "@babel/preset-env@^7.7.4":
|
||||
"@babel/preset-env@^7.0.0", "@babel/preset-env@^7.1.5", "@babel/preset-env@^7.1.6", "@babel/preset-env@^7.11.0", "@babel/preset-env@^7.3.1", "@babel/preset-env@^7.4.4", "@babel/preset-env@^7.7.1", "@babel/preset-env@^7.7.4":
|
||||
version "7.12.11"
|
||||
resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.12.11.tgz#55d5f7981487365c93dbbc84507b1c7215e857f9"
|
||||
integrity sha512-j8Tb+KKIXKYlDBQyIOy4BLxzv1NUOwlHfZ74rvW+Z0Gp4/cI2IMDPBWAgWceGcE7aep9oL/0K9mlzlMGxA8yNw==
|
||||
@@ -1026,7 +1026,7 @@
|
||||
normalize-path "^2.0.1"
|
||||
through2 "^2.0.3"
|
||||
|
||||
"@iarna/toml@^2.2.1":
|
||||
"@iarna/toml@^2.2.0", "@iarna/toml@^2.2.1":
|
||||
version "2.2.5"
|
||||
resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c"
|
||||
integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==
|
||||
@@ -1366,6 +1366,14 @@
|
||||
"@types/yargs" "^15.0.0"
|
||||
chalk "^4.0.0"
|
||||
|
||||
"@marsaud/smb2@^0.15.0":
|
||||
version "0.15.0"
|
||||
resolved "https://registry.yarnpkg.com/@marsaud/smb2/-/smb2-0.15.0.tgz#151eaa0d0a5e3fc8929d7dc872c2bed397b444dd"
|
||||
integrity sha512-8y6Gvr8FUMNrRxoQ3luNJWB8y0JKu+8WO6NnT1kyIZaWMa94iIvFTmyKmu40wRrZ2W4bqh1gTJYp7E99NO4usQ==
|
||||
dependencies:
|
||||
ntlm "~0.1.1"
|
||||
readable-stream "^3.0.6"
|
||||
|
||||
"@marsaud/smb2@^0.17.2":
|
||||
version "0.17.2"
|
||||
resolved "https://registry.yarnpkg.com/@marsaud/smb2/-/smb2-0.17.2.tgz#5cc028f24f1fff7aa9012b221e36ffbf90a43367"
|
||||
@@ -2079,6 +2087,27 @@
|
||||
"@webassemblyjs/wast-parser" "1.9.0"
|
||||
"@xtuc/long" "4.2.2"
|
||||
|
||||
"@xen-orchestra/fs@0.12.0":
|
||||
version "0.12.0"
|
||||
resolved "https://registry.yarnpkg.com/@xen-orchestra/fs/-/fs-0.12.0.tgz#0b925d73d202a363c7483bc62b42bedac5708f1e"
|
||||
integrity sha512-HA0D4KD6PKDY+klzo33L4zZAdbJ9xzWnM/QsibWgpSd2GqRAz06VxpSQMxJ9OYxuIn8YzWQNOkBOsADpL/Ao1A==
|
||||
dependencies:
|
||||
"@marsaud/smb2" "^0.15.0"
|
||||
"@sindresorhus/df" "^3.1.1"
|
||||
"@xen-orchestra/async-map" "^0.0.0"
|
||||
aws-sdk "^2.686.0"
|
||||
decorator-synchronized "^0.5.0"
|
||||
execa "^4.0.2"
|
||||
fs-extra "^9.0.0"
|
||||
get-stream "^6.0.0"
|
||||
limit-concurrency-decorator "^0.4.0"
|
||||
lodash "^4.17.4"
|
||||
promise-toolbox "^0.15.0"
|
||||
readable-stream "^3.0.6"
|
||||
through2 "^4.0.2"
|
||||
tmp "^0.2.1"
|
||||
xo-remote-parser "^0.6.0"
|
||||
|
||||
"@xmpp/jid@^0.0.2":
|
||||
version "0.0.2"
|
||||
resolved "https://registry.yarnpkg.com/@xmpp/jid/-/jid-0.0.2.tgz#0d528ca9d58dafc833665564ffe62f332a3167f2"
|
||||
@@ -2151,7 +2180,7 @@ abstract-leveldown@~6.2.1, abstract-leveldown@~6.2.3:
|
||||
level-supports "~1.0.0"
|
||||
xtend "~4.0.0"
|
||||
|
||||
accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7:
|
||||
accepts@^1.3.5, accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7:
|
||||
version "1.3.7"
|
||||
resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd"
|
||||
integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==
|
||||
@@ -2457,7 +2486,7 @@ ansi_up@^4.0.3:
|
||||
resolved "https://registry.yarnpkg.com/ansi_up/-/ansi_up-4.0.4.tgz#5b8c35f0b02e4476f3f18cf89c3bf48d15d054f6"
|
||||
integrity sha512-vRxC8q6QY918MbehO869biJW4tiunJdjOhi5fpY6NLOliBQlZhOkKgABJKJqH+JZfb/WfjvjN1chLWI6tODerw==
|
||||
|
||||
any-promise@^1.0.0, any-promise@^1.3.0:
|
||||
any-promise@^1.0.0, any-promise@^1.1.0, any-promise@^1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f"
|
||||
integrity sha1-q8av7tzqUugJzcA3au0845Y10X8=
|
||||
@@ -2486,6 +2515,18 @@ anymatch@^3.0.3, anymatch@~3.1.1:
|
||||
normalize-path "^3.0.0"
|
||||
picomatch "^2.0.4"
|
||||
|
||||
app-conf@^0.7.0:
|
||||
version "0.7.1"
|
||||
resolved "https://registry.yarnpkg.com/app-conf/-/app-conf-0.7.1.tgz#59bf3605e6d8fa19107408ae9f94c1c9984764b4"
|
||||
integrity sha512-arusY8qGB4EmPFUweuLVQde4cj0zQfMBo+D7wRk285SQPfetX9Jk+4VRL+20n5dsJr4Rm4uj2y6exm78wsgCFg==
|
||||
dependencies:
|
||||
debug "^4.1.0"
|
||||
glob "^7.1.3"
|
||||
lodash "^4.17.11"
|
||||
make-error "^1.3.5"
|
||||
promise-toolbox "^0.14.0"
|
||||
xdg-basedir "^3.0.0"
|
||||
|
||||
app-conf@^0.8.0:
|
||||
version "0.8.0"
|
||||
resolved "https://registry.yarnpkg.com/app-conf/-/app-conf-0.8.0.tgz#14c8393a68097ff03fab259e832b1be89b71dfc6"
|
||||
@@ -2498,6 +2539,19 @@ app-conf@^0.8.0:
|
||||
promise-toolbox "^0.14.0"
|
||||
xdg-basedir "^3.0.0"
|
||||
|
||||
app-conf@^0.9.0:
|
||||
version "0.9.0"
|
||||
resolved "https://registry.yarnpkg.com/app-conf/-/app-conf-0.9.0.tgz#194cbcd023a1b1fe7f86fb0ee8cb612cb81cdc13"
|
||||
integrity sha512-8Wcs9auzsfHQgR0x6GgA2V65ZVXue1mpbypoALMPXEiJkkZGIdhsKCCMQ7ab0AvbuEDVLYeCbzdCZzjS6eiRww==
|
||||
dependencies:
|
||||
chokidar "^3.5.1"
|
||||
debug "^4.1.0"
|
||||
glob "^7.1.3"
|
||||
lodash "^4.17.11"
|
||||
make-error "^1.3.5"
|
||||
promise-toolbox "^0.16.0"
|
||||
xdg-basedir "^3.0.0"
|
||||
|
||||
append-buffer@^1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/append-buffer/-/append-buffer-1.0.2.tgz#d8220cf466081525efea50614f3de6514dfa58f1"
|
||||
@@ -4496,7 +4550,7 @@ bytes@3.0.0:
|
||||
resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048"
|
||||
integrity sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=
|
||||
|
||||
bytes@3.1.0:
|
||||
bytes@3.1.0, bytes@^3.0.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6"
|
||||
integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==
|
||||
@@ -4542,6 +4596,14 @@ cache-base@^1.0.1:
|
||||
union-value "^1.0.0"
|
||||
unset-value "^1.0.0"
|
||||
|
||||
cache-content-type@^1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/cache-content-type/-/cache-content-type-1.0.1.tgz#035cde2b08ee2129f4a8315ea8f00a00dba1453c"
|
||||
integrity sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==
|
||||
dependencies:
|
||||
mime-types "^2.1.18"
|
||||
ylru "^1.2.0"
|
||||
|
||||
cache-loader@^3.0.0:
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/cache-loader/-/cache-loader-3.0.1.tgz#cee6cf4b3cdc7c610905b26bad6c2fc439c821af"
|
||||
@@ -4843,6 +4905,21 @@ chokidar@^3.4.0, chokidar@^3.4.1:
|
||||
optionalDependencies:
|
||||
fsevents "~2.3.1"
|
||||
|
||||
chokidar@^3.5.1:
|
||||
version "3.5.1"
|
||||
resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.5.1.tgz#ee9ce7bbebd2b79f49f304799d5468e31e14e68a"
|
||||
integrity sha512-9+s+Od+W0VJJzawDma/gvBNQqkTiqYTWLuZoyAsivsI4AaWTCzHG06/TMjsf1cYe9Cb97UCEhjz7HvnPk2p/tw==
|
||||
dependencies:
|
||||
anymatch "~3.1.1"
|
||||
braces "~3.0.2"
|
||||
glob-parent "~5.1.0"
|
||||
is-binary-path "~2.1.0"
|
||||
is-glob "~4.0.1"
|
||||
normalize-path "~3.0.0"
|
||||
readdirp "~3.5.0"
|
||||
optionalDependencies:
|
||||
fsevents "~2.3.1"
|
||||
|
||||
chownr@^1.1.1:
|
||||
version "1.1.4"
|
||||
resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b"
|
||||
@@ -5057,6 +5134,11 @@ code-point-at@^1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77"
|
||||
integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=
|
||||
|
||||
coffeescript@1.12.7:
|
||||
version "1.12.7"
|
||||
resolved "https://registry.yarnpkg.com/coffeescript/-/coffeescript-1.12.7.tgz#e57ee4c4867cf7f606bfc4a0f2d550c0981ddd27"
|
||||
integrity sha512-pLXHFxQMPklVoEekowk8b3erNynC+DVJzChxS/LCBBgR6/8AJkHivkm//zbowcfc7BTCAjryuhx6gPqPRfsFoA==
|
||||
|
||||
collect-v8-coverage@^1.0.0:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.1.tgz#cc2c8e94fc18bbdffe64d6534570c8a673b27f59"
|
||||
@@ -5191,7 +5273,7 @@ commondir@^1.0.1:
|
||||
resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b"
|
||||
integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs=
|
||||
|
||||
compare-versions@^3.6.0:
|
||||
compare-versions@^3.4.0, compare-versions@^3.6.0:
|
||||
version "3.6.0"
|
||||
resolved "https://registry.yarnpkg.com/compare-versions/-/compare-versions-3.6.0.tgz#1a5689913685e5a87637b8d3ffca75514ec41d62"
|
||||
integrity sha512-W6Af2Iw1z4CB7q4uU4hv646dW9GQuBM+YpC0UvUCWSD8w90SJjp+ujJuXaEMtAXBtSqGfMPuFOVn4/+FlaqfBA==
|
||||
@@ -5211,7 +5293,7 @@ compress-commons@^4.0.2:
|
||||
normalize-path "^3.0.0"
|
||||
readable-stream "^3.6.0"
|
||||
|
||||
compressible@~2.0.16:
|
||||
compressible@^2.0.0, compressible@~2.0.16:
|
||||
version "2.0.18"
|
||||
resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba"
|
||||
integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==
|
||||
@@ -5318,7 +5400,7 @@ contains-path@^0.1.0:
|
||||
resolved "https://registry.yarnpkg.com/contains-path/-/contains-path-0.1.0.tgz#fe8cf184ff6670b6baef01a9d4861a5cbec4120a"
|
||||
integrity sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo=
|
||||
|
||||
content-disposition@0.5.3:
|
||||
content-disposition@0.5.3, content-disposition@~0.5.2:
|
||||
version "0.5.3"
|
||||
resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd"
|
||||
integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==
|
||||
@@ -5370,6 +5452,14 @@ cookie@^0.4.0:
|
||||
resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.1.tgz#afd713fe26ebd21ba95ceb61f9a8116e50a537d1"
|
||||
integrity sha512-ZwrFkGJxUR3EIoXtO+yVE69Eb7KlixbaeAWfBQB9vVsNn/o+Yw69gBWSSDK825hQNdN+wF8zELf3dFNl/kxkUA==
|
||||
|
||||
cookies@~0.8.0:
|
||||
version "0.8.0"
|
||||
resolved "https://registry.yarnpkg.com/cookies/-/cookies-0.8.0.tgz#1293ce4b391740a8406e3c9870e828c4b54f3f90"
|
||||
integrity sha512-8aPsApQfebXnuI+537McwYsDtjVxGm8gTIzQI3FDW6t5t/DAhERxtnbEPN/8RX+uZthoz4eCOgloXaE5cYyNow==
|
||||
dependencies:
|
||||
depd "~2.0.0"
|
||||
keygrip "~1.1.0"
|
||||
|
||||
copy-concurrently@^1.0.0:
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0"
|
||||
@@ -5594,6 +5684,13 @@ crypto-random-string@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5"
|
||||
integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==
|
||||
|
||||
cson-parser@^4.0.7:
|
||||
version "4.0.7"
|
||||
resolved "https://registry.yarnpkg.com/cson-parser/-/cson-parser-4.0.7.tgz#41cfbcb16f6481d7dbd8e80784db1d87a2bbd04d"
|
||||
integrity sha512-BSnAl0gllETWjU9/lb8MmeqhsGaRINPwhoPiBjI/TJBRvKf/24I9EVqnwvmk6R3Gt66cMRSGVktl6QicxIb72g==
|
||||
dependencies:
|
||||
coffeescript "1.12.7"
|
||||
|
||||
css-color-keywords@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/css-color-keywords/-/css-color-keywords-1.0.0.tgz#fea2616dc676b2962686b3af8dbdbe180b244e05"
|
||||
@@ -6026,7 +6123,7 @@ d3-shape@1:
|
||||
dependencies:
|
||||
d3-path "1"
|
||||
|
||||
d3-time-format@2:
|
||||
d3-time-format@2, d3-time-format@^2.2.3:
|
||||
version "2.3.0"
|
||||
resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-2.3.0.tgz#107bdc028667788a8924ba040faf1fbccd5a7850"
|
||||
integrity sha512-guv6b2H37s2Uq/GefleCDtbe0XZAuy7Wa49VGkPVPMfLL9qObgBST3lEHJBMUp8S7NdLQAGIvr2KXk8Hc98iKQ==
|
||||
@@ -6273,6 +6370,11 @@ deep-equal@^1.0.1:
|
||||
object-keys "^1.1.1"
|
||||
regexp.prototype.flags "^1.2.0"
|
||||
|
||||
deep-equal@~1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.0.1.tgz#f5d260292b660e084eff4cdbc9f08ad3247448b5"
|
||||
integrity sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=
|
||||
|
||||
deep-extend@^0.6.0:
|
||||
version "0.6.0"
|
||||
resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac"
|
||||
@@ -6412,7 +6514,7 @@ denque@^1.4.1:
|
||||
resolved "https://registry.yarnpkg.com/denque/-/denque-1.5.0.tgz#773de0686ff2d8ec2ff92914316a47b73b1c73de"
|
||||
integrity sha512-CYiCSgIF1p6EUByQPlGkKnP1M9g0ZV3qMIrqMqZqdwazygIA/YP2vrbcyl1h/WppKJTdl1F85cXIle+394iDAQ==
|
||||
|
||||
depd@2.0.0, depd@~2.0.0:
|
||||
depd@2.0.0, depd@^2.0.0, depd@~2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df"
|
||||
integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==
|
||||
@@ -6450,7 +6552,7 @@ des.js@^1.0.0:
|
||||
inherits "^2.0.1"
|
||||
minimalistic-assert "^1.0.0"
|
||||
|
||||
destroy@~1.0.4:
|
||||
destroy@^1.0.4, destroy@~1.0.4:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
|
||||
integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=
|
||||
@@ -6861,7 +6963,7 @@ emojis-list@^3.0.0:
|
||||
resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-3.0.0.tgz#5570662046ad29e2e916e71aae260abdff4f6a78"
|
||||
integrity sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==
|
||||
|
||||
encodeurl@~1.0.2:
|
||||
encodeurl@^1.0.2, encodeurl@~1.0.2:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
|
||||
integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=
|
||||
@@ -6883,7 +6985,7 @@ encoding@^0.1.11:
|
||||
dependencies:
|
||||
iconv-lite "^0.6.2"
|
||||
|
||||
end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1:
|
||||
end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1, end-of-stream@^1.4.4:
|
||||
version "1.4.4"
|
||||
resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0"
|
||||
integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==
|
||||
@@ -7482,7 +7584,7 @@ execa@^2.0.1:
|
||||
signal-exit "^3.0.2"
|
||||
strip-final-newline "^2.0.0"
|
||||
|
||||
execa@^4.0.0, execa@^4.1.0:
|
||||
execa@^4.0.0, execa@^4.0.2, execa@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/execa/-/execa-4.1.0.tgz#4e5491ad1572f2f17a77d388c6c857135b22847a"
|
||||
integrity sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==
|
||||
@@ -8162,7 +8264,7 @@ fragment-cache@^0.2.1:
|
||||
dependencies:
|
||||
map-cache "^0.2.2"
|
||||
|
||||
fresh@0.5.2:
|
||||
fresh@0.5.2, fresh@~0.5.2:
|
||||
version "0.5.2"
|
||||
resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
|
||||
integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=
|
||||
@@ -9041,7 +9143,7 @@ helmet-csp@2.10.0:
|
||||
content-security-policy-builder "2.1.0"
|
||||
dasherize "2.0.0"
|
||||
|
||||
helmet@^3.9.0:
|
||||
helmet@^3.21.1, helmet@^3.9.0:
|
||||
version "3.23.3"
|
||||
resolved "https://registry.yarnpkg.com/helmet/-/helmet-3.23.3.tgz#5ba30209c5f73ded4ab65746a3a11bedd4579ab7"
|
||||
integrity sha512-U3MeYdzPJQhtvqAVBPntVgAvNSOJyagwZwyKsFdyRa8TV3pOKVFljalPOCxbw5Wwf2kncGhmP0qHjyazIdNdSA==
|
||||
@@ -9265,6 +9367,14 @@ htmlparser2@^6.0.0:
|
||||
domutils "^2.4.4"
|
||||
entities "^2.0.0"
|
||||
|
||||
http-assert@^1.3.0:
|
||||
version "1.4.1"
|
||||
resolved "https://registry.yarnpkg.com/http-assert/-/http-assert-1.4.1.tgz#c5f725d677aa7e873ef736199b89686cceb37878"
|
||||
integrity sha512-rdw7q6GTlibqVVbXr0CKelfV5iY8G2HqEUkhSk297BMbSpSL8crXC+9rjKoMcZZEsksX30le6f/4ul4E28gegw==
|
||||
dependencies:
|
||||
deep-equal "~1.0.1"
|
||||
http-errors "~1.7.2"
|
||||
|
||||
http-cache-semantics@^4.0.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390"
|
||||
@@ -9297,6 +9407,17 @@ http-errors@1.7.3, http-errors@~1.7.2:
|
||||
statuses ">= 1.5.0 < 2"
|
||||
toidentifier "1.0.0"
|
||||
|
||||
http-errors@^1.3.1, http-errors@^1.6.3, http-errors@~1.8.0:
|
||||
version "1.8.0"
|
||||
resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.0.tgz#75d1bbe497e1044f51e4ee9e704a62f28d336507"
|
||||
integrity sha512-4I8r0C5JDhT5VkvI47QktDW75rNlGVsUf/8hzjCC/wkWI/jdTRmBb9aI7erSG82r1bjKY3F6k28WnsVxB1C73A==
|
||||
dependencies:
|
||||
depd "~1.1.2"
|
||||
inherits "2.0.4"
|
||||
setprototypeof "1.2.0"
|
||||
statuses ">= 1.5.0 < 2"
|
||||
toidentifier "1.0.0"
|
||||
|
||||
http-errors@~1.3.1:
|
||||
version "1.3.1"
|
||||
resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.3.1.tgz#197e22cdebd4198585e8694ef6786197b91ed942"
|
||||
@@ -9315,17 +9436,6 @@ http-errors@~1.6.2:
|
||||
setprototypeof "1.1.0"
|
||||
statuses ">= 1.4.0 < 2"
|
||||
|
||||
http-errors@~1.8.0:
|
||||
version "1.8.0"
|
||||
resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.0.tgz#75d1bbe497e1044f51e4ee9e704a62f28d336507"
|
||||
integrity sha512-4I8r0C5JDhT5VkvI47QktDW75rNlGVsUf/8hzjCC/wkWI/jdTRmBb9aI7erSG82r1bjKY3F6k28WnsVxB1C73A==
|
||||
dependencies:
|
||||
depd "~1.1.2"
|
||||
inherits "2.0.4"
|
||||
setprototypeof "1.2.0"
|
||||
statuses ">= 1.5.0 < 2"
|
||||
toidentifier "1.0.0"
|
||||
|
||||
http-parser-js@>=0.5.1:
|
||||
version "0.5.3"
|
||||
resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.3.tgz#01d2709c79d41698bb01d4decc5e9da4e4a033d9"
|
||||
@@ -9990,6 +10100,11 @@ is-generator-fn@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118"
|
||||
integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==
|
||||
|
||||
is-generator-function@^1.0.7:
|
||||
version "1.0.8"
|
||||
resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.8.tgz#dfb5c2b120e02b0a8d9d2c6806cd5621aa922f7b"
|
||||
integrity sha512-2Omr/twNtufVZFr1GhxjOMFPAj2sjc/dKaIqBhvo4qciXfJmITGH6ZGd8eZYNHza8t1y0e01AuqRhJwfWp26WQ==
|
||||
|
||||
is-glob@^2.0.0, is-glob@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-2.0.1.tgz#d096f926a3ded5600f3fdfd91198cb0888c2d863"
|
||||
@@ -11433,6 +11548,13 @@ keycode@^2.1.0:
|
||||
resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.2.0.tgz#3d0af56dc7b8b8e5cba8d0a97f107204eec22b04"
|
||||
integrity sha1-PQr1bce4uOXLqNCpfxByBO7CKwQ=
|
||||
|
||||
keygrip@~1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/keygrip/-/keygrip-1.1.0.tgz#871b1681d5e159c62a445b0c74b615e0917e7226"
|
||||
integrity sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ==
|
||||
dependencies:
|
||||
tsscmp "1.0.6"
|
||||
|
||||
keyv@^3.0.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9"
|
||||
@@ -11484,6 +11606,89 @@ kleur@^3.0.3:
|
||||
resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e"
|
||||
integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==
|
||||
|
||||
koa-compose@^3.0.0:
|
||||
version "3.2.1"
|
||||
resolved "https://registry.yarnpkg.com/koa-compose/-/koa-compose-3.2.1.tgz#a85ccb40b7d986d8e5a345b3a1ace8eabcf54de7"
|
||||
integrity sha1-qFzLQLfZhtjlo0Wzoazo6rz1Tec=
|
||||
dependencies:
|
||||
any-promise "^1.1.0"
|
||||
|
||||
koa-compose@^4.1.0:
|
||||
version "4.1.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-compose/-/koa-compose-4.1.0.tgz#507306b9371901db41121c812e923d0d67d3e877"
|
||||
integrity sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==
|
||||
|
||||
koa-compress@^3.0.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-compress/-/koa-compress-3.1.0.tgz#00fb0af695dc4661c6de261a18da669626ea3ca1"
|
||||
integrity sha512-0m24/yS/GbhWI+g9FqtvStY+yJwTObwoxOvPok6itVjRen7PBWkjsJ8pre76m+99YybXLKhOJ62mJ268qyBFMQ==
|
||||
dependencies:
|
||||
bytes "^3.0.0"
|
||||
compressible "^2.0.0"
|
||||
koa-is-json "^1.0.0"
|
||||
statuses "^1.0.0"
|
||||
|
||||
koa-convert@^1.2.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-convert/-/koa-convert-1.2.0.tgz#da40875df49de0539098d1700b50820cebcd21d0"
|
||||
integrity sha1-2kCHXfSd4FOQmNFwC1CCDOvNIdA=
|
||||
dependencies:
|
||||
co "^4.6.0"
|
||||
koa-compose "^3.0.0"
|
||||
|
||||
koa-helmet@^5.1.0:
|
||||
version "5.2.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-helmet/-/koa-helmet-5.2.0.tgz#6529f64dd4539261a9bb0a56e201e4976f0200f0"
|
||||
integrity sha512-Q4h4CnpcEo3NuIvD1bBOakkfusPiOvJc/NlOI9M+pG3zeNm2OqFLMbIzCPsvGBz++37KMregUBXZvQiNPDD37w==
|
||||
dependencies:
|
||||
helmet "^3.21.1"
|
||||
|
||||
koa-is-json@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-is-json/-/koa-is-json-1.0.0.tgz#273c07edcdcb8df6a2c1ab7d59ee76491451ec14"
|
||||
integrity sha1-JzwH7c3Ljfaiwat9We52SRRR7BQ=
|
||||
|
||||
koa-router@^7.4.0:
|
||||
version "7.4.0"
|
||||
resolved "https://registry.yarnpkg.com/koa-router/-/koa-router-7.4.0.tgz#aee1f7adc02d5cb31d7d67465c9eacc825e8c5e0"
|
||||
integrity sha512-IWhaDXeAnfDBEpWS6hkGdZ1ablgr6Q6pGdXCyK38RbzuH4LkUOpPqPw+3f8l8aTDrQmBQ7xJc0bs2yV4dzcO+g==
|
||||
dependencies:
|
||||
debug "^3.1.0"
|
||||
http-errors "^1.3.1"
|
||||
koa-compose "^3.0.0"
|
||||
methods "^1.0.1"
|
||||
path-to-regexp "^1.1.1"
|
||||
urijs "^1.19.0"
|
||||
|
||||
koa@^2.5.1:
|
||||
version "2.13.1"
|
||||
resolved "https://registry.yarnpkg.com/koa/-/koa-2.13.1.tgz#6275172875b27bcfe1d454356a5b6b9f5a9b1051"
|
||||
integrity sha512-Lb2Dloc72auj5vK4X4qqL7B5jyDPQaZucc9sR/71byg7ryoD1NCaCm63CShk9ID9quQvDEi1bGR/iGjCG7As3w==
|
||||
dependencies:
|
||||
accepts "^1.3.5"
|
||||
cache-content-type "^1.0.0"
|
||||
content-disposition "~0.5.2"
|
||||
content-type "^1.0.4"
|
||||
cookies "~0.8.0"
|
||||
debug "~3.1.0"
|
||||
delegates "^1.0.0"
|
||||
depd "^2.0.0"
|
||||
destroy "^1.0.4"
|
||||
encodeurl "^1.0.2"
|
||||
escape-html "^1.0.3"
|
||||
fresh "~0.5.2"
|
||||
http-assert "^1.3.0"
|
||||
http-errors "^1.6.3"
|
||||
is-generator-function "^1.0.7"
|
||||
koa-compose "^4.1.0"
|
||||
koa-convert "^1.2.0"
|
||||
on-finished "^2.3.0"
|
||||
only "~0.0.2"
|
||||
parseurl "^1.3.2"
|
||||
statuses "^1.5.0"
|
||||
type-is "^1.6.16"
|
||||
vary "^1.1.2"
|
||||
|
||||
l33teral@^3.0.3:
|
||||
version "3.0.3"
|
||||
resolved "https://registry.yarnpkg.com/l33teral/-/l33teral-3.0.3.tgz#9a1dc526fbdff5c4b0f348a7e35fba66f75dea07"
|
||||
@@ -12434,7 +12639,7 @@ merge2@^1.2.3, merge2@^1.3.0:
|
||||
resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
|
||||
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
|
||||
|
||||
methods@~1.1.2:
|
||||
methods@^1.0.1, methods@~1.1.2:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee"
|
||||
integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=
|
||||
@@ -12498,7 +12703,7 @@ mime-db@1.45.0, "mime-db@>= 1.43.0 < 2":
|
||||
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.45.0.tgz#cceeda21ccd7c3a745eba2decd55d4b73e7879ea"
|
||||
integrity sha512-CkqLUxUk15hofLoLyljJSrukZi8mAtgd+yE5uO4tqRZsdsAJKv0O+rFMhVDRJgozy+yG6md5KwuXhD4ocIoP+w==
|
||||
|
||||
mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.19, mime-types@~2.1.24:
|
||||
mime-types@^2.1.12, mime-types@^2.1.18, mime-types@~2.1.17, mime-types@~2.1.19, mime-types@~2.1.24:
|
||||
version "2.1.28"
|
||||
resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.28.tgz#1160c4757eab2c5363888e005273ecf79d2a0ecd"
|
||||
integrity sha512-0TO2yJ5YHYr7M2zzT7gDU1tbwHxEUWBCLt0lscSNpcdAfFyJOVEpRYNS7EXVcTLNj/25QO8gulHC5JtTzSE2UQ==
|
||||
@@ -13134,6 +13339,11 @@ node-xmpp-tls-connect@^1.0.1:
|
||||
resolved "https://registry.yarnpkg.com/node-xmpp-tls-connect/-/node-xmpp-tls-connect-1.0.1.tgz#91ace43ac26b138861b2be478df9df19d61dc5c3"
|
||||
integrity sha1-kazkOsJrE4hhsr5HjfnfGdYdxcM=
|
||||
|
||||
node-zone@^0.4.0:
|
||||
version "0.4.0"
|
||||
resolved "https://registry.yarnpkg.com/node-zone/-/node-zone-0.4.0.tgz#b058401a2e7c4bd34cb8cda8ee9d61360f620711"
|
||||
integrity sha512-JzQcDNvCdWF1wmHAvNN2U63+8K72u4hN40axkrpRWE6Yvhkuf2i72SMzWUHiP3rxdm2CbgaaOw8N5Gw2qN5pbA==
|
||||
|
||||
nodemailer-markdown@^1.0.1:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/nodemailer-markdown/-/nodemailer-markdown-1.0.3.tgz#13b1f9bcbcce3ff5d5c22a32bf097583e5f194bd"
|
||||
@@ -13481,7 +13691,7 @@ obuf@^1.0.0, obuf@^1.1.2:
|
||||
resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e"
|
||||
integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==
|
||||
|
||||
on-finished@~2.3.0:
|
||||
on-finished@^2.3.0, on-finished@~2.3.0:
|
||||
version "2.3.0"
|
||||
resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947"
|
||||
integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=
|
||||
@@ -13507,6 +13717,11 @@ onetime@^5.1.0, onetime@^5.1.2:
|
||||
dependencies:
|
||||
mimic-fn "^2.1.0"
|
||||
|
||||
only@~0.0.2:
|
||||
version "0.0.2"
|
||||
resolved "https://registry.yarnpkg.com/only/-/only-0.0.2.tgz#2afde84d03e50b9a8edc444e30610a70295edfb4"
|
||||
integrity sha1-Kv3oTQPlC5qO3EROMGEKcCle37Q=
|
||||
|
||||
opencollective-postinstall@^2.0.2, opencollective-postinstall@^2.0.3:
|
||||
version "2.0.3"
|
||||
resolved "https://registry.yarnpkg.com/opencollective-postinstall/-/opencollective-postinstall-2.0.3.tgz#7a0fff978f6dbfa4d006238fbac98ed4198c3259"
|
||||
@@ -13867,7 +14082,7 @@ parse-node-version@^1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/parse-node-version/-/parse-node-version-1.0.1.tgz#e2b5dbede00e7fa9bc363607f53327e8b073189b"
|
||||
integrity sha512-3YHlOa/JgH6Mnpr05jP9eDG254US9ek25LyIxZlDItp2iJtwyaXQb57lBYLdT3MowkUFYEV2XXNAYIPlESvJlA==
|
||||
|
||||
parse-pairs@^1.0.0:
|
||||
parse-pairs@^1.0.0, parse-pairs@^1.1.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/parse-pairs/-/parse-pairs-1.1.0.tgz#1974ba9588caf29b8ab8a1b855f3c75558f5176e"
|
||||
integrity sha512-eqERRqqrwe45MkmHpl9qMr44kEhfcMcowbu1AJdJU1RBa6YJ4PlGFKmcJ00YaeBMYDUtGGnFoe5OfYaVwb3xXQ==
|
||||
@@ -13899,7 +14114,7 @@ parse5@^6.0.0, parse5@^6.0.1:
|
||||
resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b"
|
||||
integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==
|
||||
|
||||
parseurl@~1.3.0, parseurl@~1.3.2, parseurl@~1.3.3:
|
||||
parseurl@^1.3.2, parseurl@~1.3.0, parseurl@~1.3.2, parseurl@~1.3.3:
|
||||
version "1.3.3"
|
||||
resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4"
|
||||
integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==
|
||||
@@ -14049,6 +14264,13 @@ path-to-regexp@0.1.7:
|
||||
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c"
|
||||
integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=
|
||||
|
||||
path-to-regexp@^1.1.1:
|
||||
version "1.8.0"
|
||||
resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.8.0.tgz#887b3ba9d84393e87a0a0b9f4cb756198b53548a"
|
||||
integrity sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==
|
||||
dependencies:
|
||||
isarray "0.0.1"
|
||||
|
||||
path-type@^1.0.0:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/path-type/-/path-type-1.1.0.tgz#59c44f7ee491da704da415da5a4070ba4f8fe441"
|
||||
@@ -14730,7 +14952,7 @@ promise-toolbox@^0.14.0:
|
||||
dependencies:
|
||||
make-error "^1.3.2"
|
||||
|
||||
promise-toolbox@^0.15.0:
|
||||
promise-toolbox@^0.15.0, promise-toolbox@^0.15.1:
|
||||
version "0.15.1"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.15.1.tgz#4027935c5ffad7fdb773f84e3680aebe1dc3a4a9"
|
||||
integrity sha512-72IUYpLNVCJRsMlwqD7s5aVWWulpqwrVm2tNmTS/Citgdz/Pdx6uMyPmqXkUTgP1qJCJNOtMd8zwKTpM1YuD8Q==
|
||||
@@ -15096,7 +15318,7 @@ pumpify@^1.3.3, pumpify@^1.3.5:
|
||||
inherits "^2.0.3"
|
||||
pump "^2.0.0"
|
||||
|
||||
pumpify@^2.0.0:
|
||||
pumpify@^2.0.0, pumpify@^2.0.1:
|
||||
version "2.0.1"
|
||||
resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-2.0.1.tgz#abfc7b5a621307c728b551decbbefb51f0e4aa1e"
|
||||
integrity sha512-m7KOje7jZxrmutanlkS1daj1dS6z6BgslzOXmcSEpIlCxM3VJH7lG5QLeck/6hgF6F4crFf01UtQmNsJfweTAw==
|
||||
@@ -17020,7 +17242,7 @@ static-extend@^0.1.1:
|
||||
define-property "^0.2.5"
|
||||
object-copy "^0.1.0"
|
||||
|
||||
statuses@1, "statuses@>= 1.4.0 < 2", "statuses@>= 1.5.0 < 2", statuses@~1.5.0:
|
||||
statuses@1, "statuses@>= 1.4.0 < 2", "statuses@>= 1.5.0 < 2", statuses@^1.0.0, statuses@^1.5.0, statuses@~1.5.0:
|
||||
version "1.5.0"
|
||||
resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c"
|
||||
integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=
|
||||
@@ -17044,7 +17266,7 @@ stealthy-require@^1.1.1:
|
||||
resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b"
|
||||
integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks=
|
||||
|
||||
stoppable@^1.0.5:
|
||||
stoppable@^1.0.5, stoppable@^1.0.6:
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/stoppable/-/stoppable-1.1.0.tgz#32da568e83ea488b08e4d7ea2c3bcc9d75015d5b"
|
||||
integrity sha512-KXDYZ9dszj6bzvnEMRYvxgeTHU74QBFL54XKtP3nyMuJ81CFYtABZ3bAzL2EdFUaEwJOBOgENyFj3R7oTzDyyw==
|
||||
@@ -18009,6 +18231,11 @@ tslint@^5.9.1:
|
||||
tslib "^1.8.0"
|
||||
tsutils "^2.29.0"
|
||||
|
||||
tsscmp@1.0.6:
|
||||
version "1.0.6"
|
||||
resolved "https://registry.yarnpkg.com/tsscmp/-/tsscmp-1.0.6.tgz#85b99583ac3589ec4bfef825b5000aa911d605eb"
|
||||
integrity sha512-LxhtAkPDTkVCMQjt2h6eBVY28KCjikZqZfMcC15YBeNjkgUpdCfBu5HoiOTDu86v6smE8yOjyEktJ8hlbANHQA==
|
||||
|
||||
tsutils@^2.29.0:
|
||||
version "2.29.0"
|
||||
resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-2.29.0.tgz#32b488501467acbedd4b85498673a0812aca0b99"
|
||||
@@ -18079,7 +18306,7 @@ type-fest@^0.8.1:
|
||||
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d"
|
||||
integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==
|
||||
|
||||
type-is@~1.6.10, type-is@~1.6.17, type-is@~1.6.18:
|
||||
type-is@^1.6.16, type-is@~1.6.10, type-is@~1.6.17, type-is@~1.6.18:
|
||||
version "1.6.18"
|
||||
resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131"
|
||||
integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==
|
||||
@@ -18388,6 +18615,11 @@ uri-js@^4.2.2:
|
||||
dependencies:
|
||||
punycode "^2.1.0"
|
||||
|
||||
urijs@^1.19.0:
|
||||
version "1.19.5"
|
||||
resolved "https://registry.yarnpkg.com/urijs/-/urijs-1.19.5.tgz#119683ab4b2fb0bd637e5ea6dd9117bcac68d3e4"
|
||||
integrity sha512-48z9VGWwdCV5KfizHsE05DWS5fhK6gFlx5MjO7xu0Krc5FGPWzjlXEVV0nPMrdVuP7xmMHiPZ2HoYZwKOFTZOg==
|
||||
|
||||
urix@^0.1.0:
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72"
|
||||
@@ -18562,7 +18794,7 @@ varint@^5.0.0, varint@~5.0.0:
|
||||
resolved "https://registry.yarnpkg.com/varint/-/varint-5.0.2.tgz#5b47f8a947eb668b848e034dcfa87d0ff8a7f7a4"
|
||||
integrity sha512-lKxKYG6H03yCZUpAGOPOsMcGxd1RHCu1iKvEHYDPmTyq2HueGhD73ssNBqqQWfvYs04G9iUFRvmAVLW20Jw6ow==
|
||||
|
||||
vary@~1.1.2:
|
||||
vary@^1.1.2, vary@~1.1.2:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc"
|
||||
integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=
|
||||
@@ -19532,6 +19764,11 @@ yazl@^2.4.3:
|
||||
dependencies:
|
||||
buffer-crc32 "~0.2.3"
|
||||
|
||||
ylru@^1.2.0:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/ylru/-/ylru-1.2.1.tgz#f576b63341547989c1de7ba288760923b27fe84f"
|
||||
integrity sha512-faQrqNMzcPCHGVC2aaOINk13K+aaBDUPjGWl0teOXywElLjyVAB6Oe2jj62jHYtwsU49jXhScYbvPENK+6zAvQ==
|
||||
|
||||
yocto-queue@^0.1.0:
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b"
|
||||
|
||||
Reference in New Issue
Block a user