Compare commits

...

44 Commits

Author SHA1 Message Date
Thierry Goettelmann
135dda5290 XO E2E testing 2022-05-25 10:37:43 +02:00
Julien Fontanet
5b7228ed69 feat(xo-server/createAuthenticationToken): throw if expiresIn is too high 2022-05-23 18:24:51 +02:00
Julien Fontanet
b02bf90c8a feat(emit-async): pass listener, event and emitter to error handler 2022-05-23 11:31:51 +02:00
Julien Fontanet
7d3546734e feat(mixins/Config#watch{,Duration}): pass previous value and path to cb 2022-05-23 09:39:48 +02:00
Julien Fontanet
030013eb5b chore(backups/merge-worker/cli): fix lint error 2022-05-23 09:39:48 +02:00
Julien Fontanet
da181345a6 feat(xo-server/store): check access to leveldb dir
Avoid getting stuck on inaccessible leveldb directory.
2022-05-23 09:39:48 +02:00
Julien Fontanet
30874b2206 chore(xo-cli): convert to ESM 2022-05-19 10:41:39 +02:00
Thierry Goettelmann
2ed6b2dc87 feat(xo-web): ability to configure a default filter for Home/Storage (#6237)
Fixes #6236
2022-05-19 09:36:11 +02:00
Julien Fontanet
41532f35d1 fix(xo-server/BackupNg#_checkAuthorizations): don't fail on missing backup.vm config section
Fixes the last part of #6243
2022-05-18 19:01:06 +02:00
Julien Fontanet
7a198a44cd chore(xo-server/BackupNg#checkAuthorization): make private
It is not used elsewhere and it makes no sense to expose a method with such a generic name on the app instance.
2022-05-18 19:01:06 +02:00
Julien Fontanet
77d615d15b chore(backups/Backup): pass baseSettings to VmBackup
Fix parts of #6243
2022-05-18 19:01:06 +02:00
Julien Fontanet
c7bc397c85 chore(backups/Backup): group settings generation in constructor 2022-05-18 19:01:06 +02:00
Julien Fontanet
38388cc297 chore(backups/VmBackup): remove unused remotes param 2022-05-18 19:01:06 +02:00
Julien Fontanet
a7b17b2b8c chore(backups/Backup): assign this.run in constructor depending of type 2022-05-18 19:01:06 +02:00
Florent BEAUCHAMP
d93afc4648 fix(xo-web/remotes): form not saving HTTPS and allow unauthorized during S3 creation (#6219) 2022-05-18 12:07:38 +02:00
Julien Fontanet
24449e41bb docs(backups): run description object 2022-05-18 11:22:30 +02:00
Julien Fontanet
df6f3ed165 chore(backups,xo-server,proxy): centralize default settings 2022-05-18 11:14:56 +02:00
Julien Fontanet
ca5914dbfb docs(backups): outline writer API 2022-05-17 14:07:01 +02:00
Julien Fontanet
3c3a1f8981 chore: update dev deps 2022-05-17 11:27:06 +02:00
Florent BEAUCHAMP
01810f35b2 fix(S3#_list): handle remote without base directory (#6218)
Related to zammad#6740
2022-05-17 10:56:45 +02:00
Florent BEAUCHAMP
5db4083414 feat(backups): add settings to force snapshotting during VM backup (#6221)
Fixes zammad#6735
2022-05-17 10:54:28 +02:00
Florent BEAUCHAMP
8bf3a747f0 feat(backups): add cache for backup metadata (#6220)
Fixes zammad#5747

Listing all the backup can be slow. To speed it up, the metadata of all the backups of each VM is cached in a single gzipped JSON file.

The cache is invalidated when a backup is deleted or created.
2022-05-17 10:43:00 +02:00
Julien Fontanet
f0e817a8d9 chore: format with Prettier 2022-05-17 10:35:16 +02:00
Florent Beauchamp
b181c59698 fix(fs/S3#_createReadStream): avoid race condition when checking file exist 2022-05-17 10:34:08 +02:00
Julien Fontanet
cfa094f208 chore(xo-proxy-cli): convert to ESM 2022-05-16 17:27:00 +02:00
Julien Fontanet
9ee5a8d089 fix(xo-proxy-cli): don't try to load xo-proxy vendor config
It's not necessary, not relevant (because it does not belong to this app) and the path was incorrect anyway.
2022-05-16 17:27:00 +02:00
Denis Fondras
819127da57 docs(installation): add OpenBSD instruction (#5762) 2022-05-16 12:05:52 +02:00
Julien Fontanet
6e9659a797 feat: release 5.70.2 2022-05-16 10:22:53 +02:00
Julien Fontanet
07bd9cadd4 fix(xo-server/vm.create): typo during VIF creation
Introduced by ecae554a7

Fixes https://xcp-ng.org/forum/post/49143
2022-05-15 14:32:59 +02:00
Florent BEAUCHAMP
a1bcd35e26 feat(backups/cleanVm): can fully merge VHD chains (#6184)
Before this change, `cleanVm` only knew how to merge a single VHD, now, with the help of `VhdSynthetic`, it can merge the whole chain in a single pass.
2022-05-13 16:46:22 +02:00
Florent BEAUCHAMP
1a741e18fd fix(vhd-lib/VhdDirectory#writeChunkFilters): correctly overwrite chunk-filter.json (#6235) 2022-05-13 13:38:02 +02:00
Olivier Lambert
2e133dd0fb feat: create SECURITY.md (#6176)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-05-13 10:58:55 +02:00
Julien Fontanet
ecae554a78 chore(xo-server): use @xen-orchestra/xapi/VIF_create 2022-05-12 15:36:19 +02:00
Julien Fontanet
4bed50b4ed chore(xo-server): remove unused {export,import}DeltaVm functions 2022-05-12 15:10:48 +02:00
Julien Fontanet
c92b371d9e feat(xo-server): 5.93.1 2022-05-12 11:50:48 +02:00
Julien Fontanet
35e6bb30db feat(@xen-orchestra/proxy): 0.22.1 2022-05-12 11:49:32 +02:00
Julien Fontanet
1aaa123f47 feat(@xen-orchestra/mixins): 0.4.0 2022-05-12 11:47:59 +02:00
Julien Fontanet
a8c507a1df feat(@xen-orchestra/backups): 0.23.0 2022-05-12 11:43:41 +02:00
Julien Fontanet
581e3c358f feat(@xen-orchestra/xapi): 1.0.0 2022-05-12 11:41:09 +02:00
Julien Fontanet
e4f1b8f2e0 fix(xo-server/installPatches): fix pool wide detection (#6231)
Introduced by 3f1c41a4f

Fixes zammad#6819 zammad#6781 zammad#6827

In #6186 the behavior was changed to always pass hosts, which broke the pool wide detection.
2022-05-12 10:56:18 +02:00
Julien Fontanet
29e8a7fd7e docs(xo-server/REST API): Set-Cookie is not implemented 2022-05-10 15:31:40 +02:00
Julien Fontanet
4af289c492 feat(docs/architecture): update xo-cli usage 2022-05-10 11:38:19 +02:00
Julien Fontanet
cd95793054 chore(mixins): convert to ESM 2022-05-09 14:46:25 +02:00
Julien Fontanet
ab71578cf2 chore(xapi): major version
When using major version zero, every increase of the minor version number is breaking.

Which means that each new version of `xapi` required also a new release of `@xen-orchestra/backups`, using a true major version will fix that.
2022-05-09 10:00:36 +02:00
75 changed files with 2681 additions and 1998 deletions

View File

@@ -7,7 +7,7 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.22.0",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",

View File

@@ -24,6 +24,32 @@ const getAdaptersByRemote = adapters => {
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
const DEFAULT_SETTINGS = {
reportWhen: 'failure',
}
const DEFAULT_VM_SETTINGS = {
bypassVdiChainsCheck: false,
checkpointSnapshot: false,
concurrency: 2,
copyRetention: 0,
deleteFirst: false,
exportRetention: 0,
fullInterval: 0,
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
unconditionalSnapshot: false,
vmTimeout: 0,
}
const DEFAULT_METADATA_SETTINGS = {
retentionPoolMetadata: 0,
retentionXoMetadata: 0,
}
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
@@ -42,17 +68,22 @@ exports.Backup = class Backup {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
})
}
run() {
const type = this._job.type
const { type } = job
const baseSettings = { ...DEFAULT_SETTINGS }
if (type === 'backup') {
return this._runVmBackup()
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
this.run = this._runVmBackup
} else if (type === 'metadataBackup') {
return this._runMetadataBackup()
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
this.run = this._runMetadataBackup
} else {
throw new Error(`No runner for the backup type ${type}`)
}
Object.assign(baseSettings, job.settings[''])
this._baseSettings = baseSettings
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
}
async _runMetadataBackup() {
@@ -64,13 +95,6 @@ exports.Backup = class Backup {
}
const config = this._config
const settings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...job.settings[''],
...job.settings[schedule.id],
}
const poolIds = extractIdsFromSimplePattern(job.pools)
const isEmptyPools = poolIds.length === 0
const isXoMetadata = job.xoMetadata !== undefined
@@ -78,6 +102,8 @@ exports.Backup = class Backup {
throw new Error('no metadata mode found')
}
const settings = this._settings
const { retentionPoolMetadata, retentionXoMetadata } = settings
if (
@@ -189,14 +215,6 @@ exports.Backup = class Backup {
const schedule = this._schedule
const config = this._config
const { settings } = job
const scheduleSettings = {
...config.defaultSettings,
...config.vm.defaultSettings,
...settings[''],
...settings[schedule.id],
}
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -231,7 +249,9 @@ exports.Backup = class Backup {
// remove srs that failed (already handled)
srs = srs.filter(_ => _ !== undefined)
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
const settings = this._settings
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
return
}
@@ -241,23 +261,26 @@ exports.Backup = class Backup {
remoteAdapters = getAdaptersByRemote(remoteAdapters)
const allSettings = this._job.settings
const baseSettings = this._baseSettings
const handleVm = vmUuid =>
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
Disposable.use(this._getRecord('VM', vmUuid), vm =>
new VmBackup({
baseSettings,
config,
getSnapshotNameLabel,
job,
// remotes,
remoteAdapters,
schedule,
settings: { ...scheduleSettings, ...settings[vmUuid] },
settings: { ...settings, ...allSettings[vm.uuid] },
srs,
vm,
}).run()
)
)
const { concurrency } = scheduleSettings
const { concurrency } = settings
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
}
)

View File

@@ -1,6 +1,7 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { synchronized } = require('decorator-synchronized')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
@@ -9,7 +10,7 @@ const groupBy = require('lodash/groupBy.js')
const pickBy = require('lodash/pickBy.js')
const { dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
@@ -17,6 +18,7 @@ const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
@@ -78,6 +80,7 @@ class RemoteAdapter {
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
}
get handler() {
@@ -261,7 +264,8 @@ class RemoteAdapter {
}
async deleteVmBackups(files) {
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
const metadatas = await asyncMap(files, file => this.readVmBackupMetadata(file))
const { delta, full, ...others } = groupBy(metadatas, 'mode')
const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
@@ -278,6 +282,9 @@ class RemoteAdapter {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
}
#getCompressionType() {
@@ -448,34 +455,94 @@ class RemoteAdapter {
return backupsByPool
}
async listVmBackups(vmUuid, predicate) {
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
}
async #getCachabledDataListVmBackups(dir) {
const handler = this._handler
const backups = []
const backups = {}
try {
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
const files = await handler.list(dir, {
filter: isMetadataFile,
prependDir: true,
})
await asyncMap(files, async file => {
try {
const metadata = await this.readVmBackupMetadata(file)
if (predicate === undefined || predicate(metadata)) {
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups.push(metadata)
}
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups[file] = metadata
} catch (error) {
warn(`listVmBackups ${file}`, { error })
warn(`can't read vm backup metadata`, { error, file, dir })
}
})
return backups
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
}
// use _ to mark this method as private by convention
// since we decorate it with synchronized.withKey in the constructor
// and # function are not writeable.
//
// read the list of backup of a Vm from cache
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(dir)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeVmBackupsCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
if (cached === undefined) {
return []
}
Object.values(cached).forEach(metadata => {
if (predicate === undefined || predicate(metadata)) {
backups.push(metadata)
}
})
return backups.sort(compareTimestamp)
}
@@ -531,46 +598,27 @@ class RemoteAdapter {
})
}
async _createSyntheticStream(handler, paths) {
let disposableVhds = []
// if it's a path : open all hierarchy of parent
if (typeof paths === 'string') {
let vhd
let vhdPath = paths
do {
const disposable = await openVhd(handler, vhdPath)
vhd = disposable.value
disposableVhds.push(disposable)
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
} else {
// only open the list of path given
disposableVhds = paths.map(path => openVhd(handler, path))
}
// open the hierarchy of ancestors until we find a full one
async _createSyntheticStream(handler, path) {
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
const disposables = await Disposable.all(disposableVhds)
const vhds = disposables.value
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposables.dispose()
await disposableSynthetic.dispose()
} catch (error) {
warn('_createSyntheticStream: failed to dispose VHDs', { error })
warn('openVhd: failed to dispose VHDs', { error })
}
}
}
const synthetic = new VhdSynthetic(vhds)
await synthetic.readHeaderAndFooter()
const synthetic = disposableSynthetic.value
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
@@ -603,7 +651,10 @@ class RemoteAdapter {
}
async readVmBackupMetadata(path) {
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
// _filename is a private field used to compute the backup id
//
// it's enumerable to make it cacheable
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
}
}

View File

@@ -45,7 +45,7 @@ const forkDeltaExport = deltaExport =>
})
class VmBackup {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
constructor({ baseSettings, config, getSnapshotNameLabel, job, remoteAdapters, schedule, settings, srs, vm }) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
// don't match replicated VMs created by this very job otherwise they
// will be replicated again and again
@@ -55,7 +55,6 @@ class VmBackup {
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
this.remotes = remotes
this.scheduleId = schedule.id
this.timestamp = undefined
@@ -173,7 +172,10 @@ class VmBackup {
const settings = this._settings
const doSnapshot =
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
settings.unconditionalSnapshot ||
this._isDelta ||
(!settings.offlineBackup && vm.power_state === 'Running') ||
settings.snapshotRetention !== 0
if (doSnapshot) {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
@@ -304,22 +306,17 @@ class VmBackup {
}
async _removeUnusedSnapshots() {
const jobSettings = this.job.settings
const allSettings = this.job.settings
const baseSettings = this._baseSettings
const baseVmRef = this._baseVm?.$ref
const { config } = this
const baseSettings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...jobSettings[''],
}
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const xapi = this._xapi
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
const settings = {
...baseSettings,
...jobSettings[scheduleId],
...jobSettings[this.vm.uuid],
...allSettings[scheduleId],
...allSettings[this.vm.uuid],
}
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {

View File

@@ -5,9 +5,9 @@
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const uuid = require('uuid')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
@@ -34,7 +34,8 @@ afterEach(async () => {
await handler.forget()
})
const uniqueId = () => crypto.randomBytes(16).toString('hex')
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
async function generateVhd(path, opts = {}) {
let vhd
@@ -53,10 +54,9 @@ async function generateVhd(path, opts = {}) {
}
vhd.header = { ...VHDHEADER, ...opts.header }
vhd.footer = { ...VHDFOOTER, ...opts.footer }
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
vhd.footer = { ...VHDFOOTER, ...opts.footer, uuid: uniqueIdBuffer() }
if (vhd.header.parentUnicodeName) {
if (vhd.header.parentUuid) {
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
} else {
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
@@ -91,24 +91,31 @@ test('It remove broken vhd', async () => {
})
test('it remove vhd with missing or multiple ancestors', async () => {
// one with a broken parent
// one with a broken parent, should be deleted
await generateVhd(`${basePath}/abandonned.vhd`, {
header: {
parentUnicodeName: 'gone.vhd',
parentUid: Buffer.from(crypto.randomBytes(16)),
parentUuid: uniqueIdBuffer(),
},
})
// one orphan, which is a full vhd, no parent
// one orphan, which is a full vhd, no parent : should stay
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
// a child to the orphan in the metadata : should stay
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
parentUuid: orphan.footer.uuid,
},
})
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [`${basePath}/child.vhd`, `${basePath}/abandonned.vhd`],
}),
{ flags: 'w' }
)
// clean
let loggued = ''
const onLog = message => {
@@ -147,7 +154,7 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
parentUuid: orphan.footer.uuid,
},
})
@@ -201,14 +208,14 @@ test('it merges delta of non destroyed chain', async () => {
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
parentUuid: orphan.footer.uuid,
},
})
// a grand child
await generateVhd(`${basePath}/grandchild.vhd`, {
header: {
parentUnicodeName: 'child.vhd',
parentUid: child.footer.uuid,
parentUuid: child.footer.uuid,
},
})
@@ -217,14 +224,12 @@ test('it merges delta of non destroyed chain', async () => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [unused, merging] = loggued
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
const [merging] = loggued
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
@@ -254,7 +259,7 @@ test('it finish unterminated merge ', async () => {
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
parentUuid: orphan.footer.uuid,
},
})
// a merge in progress file
@@ -310,7 +315,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'gone.vhd',
parentUid: crypto.randomBytes(16),
parentUuid: uniqueIdBuffer(),
},
})
@@ -324,7 +329,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: ancestor.footer.uuid,
parentUuid: ancestor.footer.uuid,
},
})
// a grand child vhd in metadata
@@ -333,7 +338,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: child.footer.uuid,
parentUuid: child.footer.uuid,
},
})
@@ -348,7 +353,7 @@ describe('tests multiple combination ', () => {
mode: vhdMode,
header: {
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: cleanAncestor.footer.uuid,
parentUuid: cleanAncestor.footer.uuid,
},
})

View File

@@ -31,66 +31,53 @@ const computeVhdsSize = (handler, vhdPaths) =>
}
)
// chain is an array of VHDs from child to parent
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
let child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
chain
.slice(1)
.reverse()
.forEach(parent => {
onLog(`the parent ${parent} of the child ${child} is unused`)
})
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
if (merge) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
if (children.length !== 1) {
// TODO: implement merging multiple children
children.length = 1
child = children[0]
}
onLog(`merging ${child} into ${parent}`)
onLog(`merging ${children.length} children into ${parent}`)
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
onLog(`merging ${child}: ${done}/${total}`)
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
}
}, 10e3)
const mergedSize = await mergeVhd(
handler,
parent,
handler,
child,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children),
{
onProgress({ done: d, total: t }) {
done = d
total = t
},
}
)
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, child),
asyncMap(children.slice(0, -1), child => {
onLog(`the VHD ${child} is unused`)
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
onLog(`the VHD ${child} is already merged`)
if (remove) {
onLog(`deleting unused VHD ${child}`)
onLog(`deleting merged VHD ${child}`)
return VhdAbstract.unlink(handler, child)
}
}),

View File

@@ -6,6 +6,11 @@
- [Task logs](#task-logs)
- [During backup](#during-backup)
- [During restoration](#during-restoration)
- [API](#api)
- [Run description object](#run-description-object)
- [`IdPattern`](#idpattern)
- [Settings](#settings)
- [Writer API](#writer-api)
## File structure on remote
@@ -66,7 +71,7 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
│ ├─ task.warning(message: string)
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string })
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
│ │ ├─ task.warning(message: string)
│ │ ├─ task.start(message: 'transfer')
│ │ │ ├─ task.warning(message: string)
@@ -95,3 +100,101 @@ task.start(message: 'restore', data: { jobId: string, srId: string, time: number
│ └─ task.end(result: { id: string, size: number })
└─ task.end
```
## API
### Run description object
This is a JavaScript object containing all the information necessary to run a backup job.
```coffee
# Information about the job itself
job:
# Unique identifier
id: string
# Human readable identifier
name: string
# Whether this job is doing Full Backup / Disaster Recovery or
# Delta Backup / Continuous Replication
mode: 'full' | 'delta'
# For backup jobs, indicates which remotes to use
remotes: IdPattern
settings:
# Used for the whole job
'': Settings
# Used for a specific schedule
[ScheduleId]: Settings
# Used for a specific VM
[VmId]: Settings
# For replication jobs, indicates which SRs to use
srs: IdPattern
# Here for historical reasons
type: 'backup'
# Indicates which VMs to backup/replicate
vms: IdPattern
# Indicates which XAPI to use to connect to a specific VM or SR
recordToXapi:
[ObjectId]: XapiId
# Information necessary to connect to each remote
remotes:
[RemoteId]:
url: string
# Indicates which schedule is used for this run
schedule:
id: ScheduleId
# Information necessary to connect to each XAPI
xapis:
[XapiId]:
allowUnauthorized: boolean
credentials:
password: string
username: string
url: string
```
### `IdPattern`
For a single object:
```
{ id: string }
```
For multiple objects:
```
{ id: { __or: string[] } }
```
> This syntax is compatible with [`value-matcher`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/value-matcher).
### Settings
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
## Writer API
- `beforeBackup()`
- **Delta**
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `cleanup()`
- **Full**
- `run({ timestamp, sizeContainer, stream })`
- `afterBackup()`

View File

@@ -1,4 +1,6 @@
#!/usr/bin/env node
// eslint-disable-next-line eslint-comments/disable-enable-pair
/* eslint-disable n/shebang */
'use strict'

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.22.0",
"version": "0.23.0",
"engines": {
"node": ">=14.6"
},
@@ -27,6 +27,7 @@
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
"golike-defer": "^0.5.1",
@@ -45,7 +46,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.11.0"
"@xen-orchestra/xapi": "^1.0.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -64,5 +64,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
}

View File

@@ -22,7 +22,7 @@ await ee.emitAsync('start')
// error handling though:
await ee.emitAsync(
{
onError(error) {
onError(error, event, listener) {
console.warn(error)
},
},

View File

@@ -40,7 +40,7 @@ await ee.emitAsync('start')
// error handling though:
await ee.emitAsync(
{
onError(error) {
onError(error, event, listener) {
console.warn(error)
},
},

View File

@@ -1,5 +1,7 @@
'use strict'
const identity = v => v
module.exports = function emitAsync(event) {
let opts
let i = 1
@@ -17,12 +19,18 @@ module.exports = function emitAsync(event) {
}
const onError = opts != null && opts.onError
const addErrorHandler = onError
? (promise, listener) => promise.catch(error => onError(error, event, listener))
: identity
return Promise.all(
this.listeners(event).map(listener =>
new Promise(resolve => {
resolve(listener.apply(this, args))
}).catch(onError)
addErrorHandler(
new Promise(resolve => {
resolve(listener.apply(this, args))
}),
listener
)
)
)
}

View File

@@ -77,9 +77,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
})
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
this._s3.middlewareStack.use(
getApplyMd5BodyChecksumPlugin(this._s3.config)
)
this._s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this._s3.config))
const parts = split(path)
this._bucket = parts.shift()
@@ -99,7 +97,12 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
_makePrefix(dir) {
return join(this._dir, dir, '/')
const prefix = join(this._dir, dir, '/')
// no prefix for root
if (prefix !== './') {
return prefix
}
}
_createParams(file) {
@@ -232,14 +235,17 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _createReadStream(path, options) {
if (!(await this._isFile(path))) {
const error = new Error(`ENOENT: no such file '${path}'`)
error.code = 'ENOENT'
error.path = path
throw error
try {
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
} catch (e) {
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file '${path}'`)
error.code = 'ENOENT'
error.path = path
throw error
}
throw e
}
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
}
async _unlink(path) {

View File

@@ -1,15 +1,13 @@
'use strict'
const get = require('lodash/get')
const identity = require('lodash/identity')
const isEqual = require('lodash/isEqual')
const { createLogger } = require('@xen-orchestra/log')
const { parseDuration } = require('@vates/parse-duration')
const { watch } = require('app-conf')
import get from 'lodash/get.js'
import identity from 'lodash/identity.js'
import isEqual from 'lodash/isEqual.js'
import { createLogger } from '@xen-orchestra/log'
import { parseDuration } from '@vates/parse-duration'
import { watch } from 'app-conf'
const { warn } = createLogger('xo:mixins:config')
module.exports = class Config {
export default class Config {
constructor(app, { appDir, appName, config }) {
this._config = config
const watchers = (this._watchers = new Set())
@@ -56,8 +54,9 @@ module.exports = class Config {
try {
const value = processor(get(config, path))
if (!isEqual(value, prev)) {
const previous = prev
prev = value
cb(value)
cb(value, previous, path)
}
} catch (error) {
warn('watch', { error, path })

View File

@@ -1,9 +1,7 @@
'use strict'
const assert = require('assert')
const emitAsync = require('@xen-orchestra/emit-async')
const EventEmitter = require('events')
const { createLogger } = require('@xen-orchestra/log')
import assert from 'assert'
import emitAsync from '@xen-orchestra/emit-async'
import EventEmitter from 'events'
import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:mixins:hooks')
@@ -19,7 +17,7 @@ const runHook = async (emitter, hook) => {
debug(`${hook} finished`)
}
module.exports = class Hooks extends EventEmitter {
export default class Hooks extends EventEmitter {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.

View File

@@ -1,15 +1,15 @@
'use strict'
import { createLogger } from '@xen-orchestra/log'
import { EventListenersManager } from '@vates/event-listeners-manager'
import { pipeline } from 'stream'
import { ServerResponse, request } from 'http'
import assert from 'assert'
import fromCallback from 'promise-toolbox/fromCallback'
import fromEvent from 'promise-toolbox/fromEvent'
import net from 'net'
const { debug, warn } = require('@xen-orchestra/log').createLogger('xo:mixins:HttpProxy')
const { EventListenersManager } = require('@vates/event-listeners-manager')
const { pipeline } = require('stream')
const { ServerResponse, request } = require('http')
const assert = require('assert')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
const net = require('net')
import { parseBasicAuth } from './_parseBasicAuth.mjs'
const { parseBasicAuth } = require('./_parseBasicAuth.js')
const { debug, warn } = createLogger('xo:mixins:HttpProxy')
const IGNORED_HEADERS = new Set([
// https://datatracker.ietf.org/doc/html/rfc2616#section-13.5.1
@@ -26,7 +26,7 @@ const IGNORED_HEADERS = new Set([
'host',
])
module.exports = class HttpProxy {
export default class HttpProxy {
#app
constructor(app, { httpServer }) {

View File

@@ -1,8 +1,6 @@
'use strict'
const RE = /^\s*basic\s+(.+?)\s*$/i
exports.parseBasicAuth = function parseBasicAuth(header) {
export function parseBasicAuth(header) {
if (header === undefined) {
return
}

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.3.1",
"version": "0.4.0",
"engines": {
"node": ">=12"
},

View File

@@ -1,25 +1,23 @@
#!/usr/bin/env node
'use strict'
import assert from 'assert'
import colors from 'ansi-colors'
import contentType from 'content-type'
import CSON from 'cson-parser'
import fromCallback from 'promise-toolbox/fromCallback'
import fs from 'fs'
import getopts from 'getopts'
import hrp from 'http-request-plus'
import split2 from 'split2'
import pumpify from 'pumpify'
import { extname } from 'path'
import { format, parse } from 'json-rpc-protocol'
import { inspect } from 'util'
import { load as loadConfig } from 'app-conf'
import { pipeline } from 'stream'
import { readChunk } from '@vates/read-chunk'
const assert = require('assert')
const colors = require('ansi-colors')
const contentType = require('content-type')
const CSON = require('cson-parser')
const fromCallback = require('promise-toolbox/fromCallback')
const fs = require('fs')
const getopts = require('getopts')
const hrp = require('http-request-plus')
const split2 = require('split2')
const pumpify = require('pumpify')
const { extname, join } = require('path')
const { format, parse } = require('json-rpc-protocol')
const { inspect } = require('util')
const { load: loadConfig } = require('app-conf')
const { pipeline } = require('stream')
const { readChunk } = require('@vates/read-chunk')
const pkg = require('./package.json')
const pkg = JSON.parse(fs.readFileSync(new URL('package.json', import.meta.url)))
const FORMATS = {
__proto__: null,
@@ -32,7 +30,6 @@ const parseValue = value => (value.startsWith('json:') ? JSON.parse(value.slice(
async function main(argv) {
const config = await loadConfig('xo-proxy', {
appDir: join(__dirname, '..'),
ignoreUnknownFormats: true,
})

View File

@@ -19,10 +19,10 @@
},
"preferGlobal": true,
"bin": {
"xo-proxy-cli": "./index.js"
"xo-proxy-cli": "./index.mjs"
},
"engines": {
"node": ">=14"
"node": ">=14.13"
},
"dependencies": {
"@iarna/toml": "^2.2.0",

View File

@@ -1,6 +1,6 @@
import Config from '@xen-orchestra/mixins/Config.js'
import Hooks from '@xen-orchestra/mixins/Hooks.js'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.js'
import Config from '@xen-orchestra/mixins/Config.mjs'
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
import mixin from '@xen-orchestra/mixin'
import { createDebounceResource } from '@vates/disposable/debounceResource.js'

View File

@@ -22,27 +22,6 @@ disableMergeWorker = false
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
vhdDirectoryCompression = 'brotli'
[backups.defaultSettings]
reportWhen = 'failure'
[backups.metadata.defaultSettings]
retentionPoolMetadata = 0
retentionXoMetadata = 0
[backups.vm.defaultSettings]
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
copyRetention = 0
deleteFirst = false
exportRetention = 0
fullInterval = 0
offlineBackup = false
offlineSnapshot = false
snapshotRetention = 0
timeout = 0
vmTimeout = 0
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.22.0",
"version": "0.22.1",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -32,13 +32,13 @@
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.22.0",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.3.1",
"@xen-orchestra/mixins": "^0.4.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.11.0",
"@xen-orchestra/xapi": "^1.0.0",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.1.0",

View File

@@ -0,0 +1,5 @@
{
"extends": [
"plugin:cypress/recommended"
]
}

4
@xen-orchestra/test-e2e/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
cypress/downloads
cypress/screenshots
cypress/videos
cypress.json

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,28 @@
{
"baseUrl": "http://ci-test-xen-orchestra.localdomain/",
"env": {
"xoLab": {
"url": "",
"username": "",
"password": ""
},
"xenOrchestra": {
"host": "ci-test-xen-orchestra.localdomain",
"username": "",
"password": "",
"snapshotId": ""
},
"xcpNgLts": {
"host": "ci-test-xcp-ng-lts.localdomain",
"username": "",
"password": "",
"snapshotId": ""
},
"xenServerLts": {
"host": "ci-test-xenserver-lts.localdomain",
"username": "",
"password": "",
"snapshotId": ""
}
}
}

View File

@@ -0,0 +1,26 @@
'use strict'
describe('Sign In', () => {
it('should not be able to sign in with bad credentials', () => {
cy.visit('/')
cy.get('[name="username"]').type('bad-user')
cy.get('[name="password"]').type('bad-password')
cy.get('.btn-info').click()
cy.get('.text-danger')
cy.url().should('not.include', '/#/home')
})
it('should be able to sign in', () => {
cy.visit('/')
cy.get('[name="username"]').type(Cypress.env('xenOrchestra').username)
cy.get('[name="password"]').type(Cypress.env('xenOrchestra').password)
cy.get('.btn-info').click()
cy.url().should('include', '/#/home')
})
it('should sign in without UI', () => {
cy.login()
cy.visit('/')
cy.url().should('include', '/#/home')
})
})

View File

@@ -0,0 +1,20 @@
'use strict'
describe('Remote', function () {
beforeEach('login', () => {
cy.login()
})
it('should add a remote', function () {
cy.addServers()
cy.visit('/#/settings/remotes')
cy.contains('New file system remote').should('exist')
cy.get('select[name="type"]').select('Local')
cy.get('input[name="name"]').type('Test local file remote 2')
cy.get('input[name="path"]').type('var/tmp/test-remote')
cy.contains('Save configuration').click()
cy.contains('Local remote selected')
cy.get('button:contains("OK")').click()
cy.get('td:contains("Test local file remote 2")').closest('tr').find('button:contains("Enabled")')
})
})

View File

@@ -0,0 +1,38 @@
'use strict'
describe('Server', () => {
beforeEach('login', () => {
cy.login()
})
it('should add a server', () => {
cy.visit('/#/settings/servers')
cy.get('#form-add-server .form-group:nth-child(1) input').type('XCP-ng LTS')
cy.get('#form-add-server .form-group:nth-child(2) input').type(Cypress.env('xcpNgLts').host)
cy.get('#form-add-server .form-group:nth-child(3) input').type(Cypress.env('xcpNgLts').username)
cy.get('#form-add-server .form-group:nth-child(4) input').type(Cypress.env('xcpNgLts').password)
cy.get('#form-add-server .form-group:nth-child(5) .xo-icon-toggle-off').click()
cy.get('span:contains("Connect")').click()
cy.get(`td:contains("XCP-ng LTS")`)
})
it('should remove a server', () => {
cy.get(`td:contains("XCP-ng LTS")`).closest('tr').find('.btn-danger').click()
cy.get(`td:contains("XCP-ng LTS")`).should('not.exist')
})
it('should disable a server', () => {
cy.addServers();
cy.visit('/#/settings/servers')
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Enabled")').click()
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Disabled")')
})
it('should enable a server', () => {
cy.visit('/#/settings/servers')
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Disabled")').click()
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Enabled")')
})
})

View File

@@ -0,0 +1,17 @@
'use strict'
describe('VM', function () {
beforeEach('login', () => {
cy.login()
})
it('should add a VM', function () {
cy.addServers()
cy.visit('/#/home?t=VM')
cy.get('a:contains("New VM")').click()
cy.contains('Create a new VM on').should('exist')
cy.contains('Select pool').closest('.Select-control').click()
cy.contains('ci-test-xcp-ng-lts').should('exist').click()
cy.get('h4:contains("Performance")').should('exist')
})
})

View File

@@ -0,0 +1,6 @@
/// <reference types="cypress" />
'use strict'
module.exports = (on, config) => {
// configure plugins here
}

View File

@@ -0,0 +1,39 @@
'use strict'
// https://on.cypress.io/custom-commands
const { default: Xo } = require('xo-lib')
const { username: xoUsername, password: xoPassword } = Cypress.env('xenOrchestra')
Cypress.Commands.add('login', (username = xoUsername, password = xoPassword) => {
cy.request({
method: 'POST',
url: '/signin/local',
form: true,
body: {
username,
password,
},
})
cy.setCookie('previousDisclaimer', Date.now().toString())
})
Cypress.Commands.add('addServers', async () => {
const xo = new Xo({ url: Cypress.config('baseUrl') })
await xo.open()
await xo.signIn({
email: xoUsername,
password: xoPassword,
})
const { host, username, password } = Cypress.env('xcpNgLts')
await xo.call('server.add', {
host,
username,
password,
label: 'XCP-ng LTS',
allowUnauthorized: true,
})
})

View File

@@ -0,0 +1,9 @@
/// <reference types="cypress" />
declare namespace Cypress {
interface Chainable<Subject> {
login(username: string, password: string): Chainable<any>
logout(): Chainable<any>
addServers(): Chainable<any>
}
}

View File

@@ -0,0 +1,10 @@
'use strict'
import './commands'
before('Restore VMs from snapshot', () => {
cy.exec(
`node scripts/restore-vm.js && wait-on ${Cypress.config('baseUrl')} && wait-on tcp:${Cypress.env('xcpNgLts').host}:80`,
{ timeout: 300e3 },
)
})

View File

@@ -0,0 +1,32 @@
{
"private": true,
"name": "@xen-orchestra/test-e2e",
"version": "0.0.0",
"license": "AGPL-3.0-or-later",
"description": "E2E Tests for Xen Orchestra",
"repository": {
"directory": "@xen-orchestra/test-e2e",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=14"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"bin": {
"restore-vm": "./scripts/restore-vm.js"
},
"devDependencies": {
"cypress": "^9.7.0",
"eslint-plugin-chai-friendly": "^0.7.2",
"eslint-plugin-cypress": "^2.12.1",
"wait-on": "^6.0.1",
"xo-lib": "^0.11.1"
},
"scripts": {
"test": "cypress run"
}
}

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env node
'use strict'
/* eslint-disable no-console */
const Xo = require('xo-lib').default
const path = require('path')
const cypressJson = require(path.resolve(__dirname, '..', 'cypress.json'))
async function main() {
const xo = new Xo({ url: cypressJson.env.xoLab.url })
await xo.open()
await xo.signIn({
email: cypressJson.env.xoLab.username,
password: cypressJson.env.xoLab.password,
})
console.log('Reverting Test VMs from snapshots')
try {
await Promise.all([
xo.call('vm.revert', { snapshot: cypressJson.env.xenServerLts.snapshotId }),
xo.call('vm.revert', { snapshot: cypressJson.env.xcpNgLts.snapshotId }),
xo.call('vm.revert', { snapshot: cypressJson.env.xenOrchestra.snapshotId }),
])
} catch (error) {
console.error('Error happened while reverting VMs')
throw error
}
xo.close()
console.log('VMs reverted successfully.')
}
main()
/* eslint-enable no-console */

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "0.11.0",
"version": "1.0.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {

View File

@@ -1,5 +1,19 @@
# ChangeLog
## 5.70.2 (2022-05-16)
### Bug fixes
- [Pool/Patches] Fix failure to install patches on Citrix Hypervisor (PR [#6231](https://github.com/vatesfr/xen-orchestra/pull/6231))
### Released packages
- @xen-orchestra/xapi 1.0.0
- @xen-orchestra/backups 0.23.0
- @xen-orchestra/mixins 0.4.0
- @xen-orchestra/proxy 0.22.1
- xo-server 5.93.1
## 5.70.1 (2022-05-04)
### Enhancement

View File

@@ -7,10 +7,18 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup] Merge multiple VHDs at once which will speed up the merging ĥase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
### Packages to release
> Packages will be released in the order they are here, therefore, they should
@@ -33,5 +41,17 @@
<!--packages-start-->
- vhd-lib patch
- @xen-orchestra/fs patch
- vhd-cli patch
- xo-vmdk-to-vhd minor
- @xen-orchestra/upload-ova patch
- @xen-orchestra/backups minor
- @xen-orchestra/backups-cli patch
- @xen-orchestra/emit-async major
- @xen-orchestra/mixins minor
- @xen-orchestra/proxy minor
- xo-server minor
- xo-web minor
<!--packages-end-->

17
SECURITY.md Normal file
View File

@@ -0,0 +1,17 @@
# Security Policy
## Supported Versions
We apply patches and fix security issues for the following versions:
| Version | Supported |
| ------- | ------------------ |
| XOA `latest` | :white_check_mark: |
| XOA `stable` | :white_check_mark: |
| `master` branch | :white_check_mark: |
| anything else | :x: |
## Reporting a Vulnerability
If you discover a vulnerability, you should contact us by sending an email to security at vates dot fr
From there, we'll discuss how to deal with it and prepare a dedicated mitigation.

View File

@@ -138,9 +138,22 @@ This CLI is mainly used as a debug tool, there's no 100% guarantee on its stabil
> xo-cli --help
Usage:
xo-cli --register <XO-Server URL> <username> [<password>]
xo-cli --register [--allowUnauthorized] [--expiresIn duration] <XO-Server URL> <username> [<password>]
Registers the XO instance to use.
--allowUnauthorized, --au
Accept invalid certificate (e.g. self-signed).
--expiresIn duration
Can be used to change the validity duration of the
authorization token (default: one month).
xo-cli --createToken <params>…
Create an authentication token for XO API.
<params>…
Accept the same parameters as --register, see its usage.
xo-cli --unregister
Remove stored credentials.
@@ -160,7 +173,6 @@ Usage:
xo-cli <command> [<name>=<value>]...
Executes a command on the current XO instance.
```
#### Register your XO instance

View File

@@ -273,6 +273,52 @@ Don't forget to start redis if you don't reboot now:
service redis start
```
### OpenBSD
If you are using OpenBSD, you need to install these packages:
```
pkg_add gmake redis python--%2.7 git node autoconf yarn
```
A few of the npm packages look for system binaries as part of their installation, and if missing will try to build it themselves. Installing these will save some time and allow for easier upgrades later:
```
pkg_add jpeg optipng gifsicle
```
Because OpenBSD is shipped with CLANG and not GCC, you need to do this:
```
export CC=/usr/bin/clang
export CXX=/usr/bin/clang++
```
You will need to update the number of allowed open files and make `node` available to `npm` :
```
ulimit -n 10240
ln -s /usr/local/bin/node /tmp/node
```
If `yarn` cannot find Python, give it an hand :
```
PYTHON=/usr/local/bin/python2 yarn
```
Enable redis on boot with:
```
rcctl enable redis
```
Don't forget to start redis if you don't reboot now:
```
rcctl start redis
```
### sudo
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server` configuration file and setting `useSudo = true`. It's near the end of the file:

View File

@@ -22,7 +22,7 @@ Cookie: authenticationToken=TN2YBOMYtXB_hHtf4wTzm9p5tTuqq2i15yeuhcz2xXM
The server will respond to an invalid token with a `401 Unauthorized` status.
The server can request that the client updates its token with a `Set-Cookie` header:
**[Not implemented at this time]** The server can request that the client updates its token with a `Set-Cookie` header:
```http
HTTP/1.1 200 OK

View File

@@ -277,7 +277,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
if (compressionType === undefined) {
await this._handler.unlink(path)
} else {
await this._handler.writeFile(path, JSON.stringify([compressionType]))
await this._handler.writeFile(path, JSON.stringify([compressionType]), { flags: 'w' })
}
}

View File

@@ -201,9 +201,7 @@ exports.VhdFile = class VhdFile extends VhdAbstract {
readBlock(blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
}
assert(blockAddr !== BLOCK_UNUSED, `no such block ${blockId}`)
return this._read(sectorsToBytes(blockAddr), onlyBitmap ? this.bitmapSize : this.fullBlockSize).then(buf =>
onlyBitmap

View File

@@ -9,8 +9,7 @@ const { getSyncedHandler } = require('@xen-orchestra/fs')
const { SECTOR_SIZE, PLATFORMS } = require('../_constants')
const { createRandomFile, convertFromRawToVhd } = require('../tests/utils')
const { openVhd, chainVhd } = require('..')
const { VhdSynthetic } = require('./VhdSynthetic')
const { openVhd, chainVhd, VhdSynthetic } = require('..')
let tempDir = null
@@ -40,10 +39,8 @@ test('It can read block and parent locator from a synthetic vhd', async () => {
// ensure the two VHD are linked, with the child of type DISK_TYPES.DIFFERENCING
await chainVhd(handler, bigVhdFileName, handler, smallVhdFileName, true)
const [smallVhd, bigVhd] = yield Disposable.all([
openVhd(handler, smallVhdFileName),
openVhd(handler, bigVhdFileName),
])
const bigVhd = yield openVhd(handler, bigVhdFileName)
await bigVhd.readBlockAllocationTable()
// add parent locato
// this will also scramble the block inside the vhd files
await bigVhd.writeParentLocator({
@@ -51,7 +48,14 @@ test('It can read block and parent locator from a synthetic vhd', async () => {
platformCode: PLATFORMS.W2KU,
data: Buffer.from('I am in the big one'),
})
const syntheticVhd = new VhdSynthetic([smallVhd, bigVhd])
// header changed since thre is a new parent locator
await bigVhd.writeHeader()
// the footer at the end changed since the block have been moved
await bigVhd.writeFooter()
await bigVhd.readHeaderAndFooter()
const syntheticVhd = yield VhdSynthetic.open(handler, [smallVhdFileName, bigVhdFileName])
await syntheticVhd.readBlockAllocationTable()
expect(syntheticVhd.header.diskType).toEqual(bigVhd.header.diskType)

View File

@@ -2,13 +2,16 @@
const UUID = require('uuid')
const cloneDeep = require('lodash/cloneDeep.js')
const Disposable = require('promise-toolbox/Disposable')
const { asyncMap } = require('@xen-orchestra/async-map')
const { VhdAbstract } = require('./VhdAbstract')
const { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } = require('../_constants')
const assert = require('assert')
const { DISK_TYPES, FOOTER_SIZE, HEADER_SIZE } = require('../_constants')
const { openVhd } = require('../openVhd')
const resolveRelativeFromFile = require('../_resolveRelativeFromFile')
const { VhdAbstract } = require('./VhdAbstract')
exports.VhdSynthetic = class VhdSynthetic extends VhdAbstract {
const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
#vhds = []
get header() {
@@ -40,13 +43,6 @@ exports.VhdSynthetic = class VhdSynthetic extends VhdAbstract {
}
}
static async open(vhds) {
const vhd = new VhdSynthetic(vhds)
return {
dispose: () => {},
value: vhd,
}
}
/**
* @param {Array<VhdAbstract>} vhds the chain of Vhds used to compute this Vhd, from the deepest child (in position 0), to the root (in the last position)
* only the last one can have any type. Other must have type DISK_TYPES.DIFFERENCING (delta)
@@ -80,6 +76,8 @@ exports.VhdSynthetic = class VhdSynthetic extends VhdAbstract {
async readBlock(blockId, onlyBitmap = false) {
const index = this.#vhds.findIndex(vhd => vhd.containsBlock(blockId))
assert(index !== -1, `no such block ${blockId}`)
// only read the content of the first vhd containing this block
return await this.#vhds[index].readBlock(blockId, onlyBitmap)
}
@@ -88,3 +86,27 @@ exports.VhdSynthetic = class VhdSynthetic extends VhdAbstract {
return this.#vhds[this.#vhds.length - 1]._readParentLocatorData(id)
}
}
// add decorated static method
VhdSynthetic.fromVhdChain = Disposable.factory(async function* fromVhdChain(handler, childPath) {
let vhdPath = childPath
let vhd
const vhds = []
do {
vhd = yield openVhd(handler, vhdPath)
vhds.push(vhd)
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== DISK_TYPES.DYNAMIC)
const synthetic = new VhdSynthetic(vhds)
await synthetic.readHeaderAndFooter()
yield synthetic
})
VhdSynthetic.open = Disposable.factory(async function* open(handler, paths, opts) {
const synthetic = new VhdSynthetic(yield Disposable.all(paths.map(path => openVhd(handler, path, opts))))
await synthetic.readHeaderAndFooter()
yield synthetic
})
exports.VhdSynthetic = VhdSynthetic

View File

@@ -8,7 +8,7 @@ const tmp = require('tmp')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const { VhdFile, chainVhd, mergeVhd: vhdMerge } = require('./index')
const { VhdFile, chainVhd, mergeVhd } = require('./index')
const { checkFile, createRandomFile, convertFromRawToVhd } = require('./tests/utils')
@@ -27,24 +27,23 @@ afterEach(async () => {
test('merge works in normal cases', async () => {
const mbOfFather = 8
const mbOfChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const child1FileName = `${tempDir}/child1.vhd`
const handler = getHandler({ url: 'file://' })
const parentRandomFileName = `randomfile`
const childRandomFileName = `small_randomfile`
const parentFileName = `parent.vhd`
const child1FileName = `child1.vhd`
const handler = getHandler({ url: `file://${tempDir}` })
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, child1FileName)
await createRandomFile(`${tempDir}/${parentRandomFileName}`, mbOfFather)
await convertFromRawToVhd(`${tempDir}/${parentRandomFileName}`, `${tempDir}/${parentFileName}`)
await createRandomFile(`${tempDir}/${childRandomFileName}`, mbOfChildren)
await convertFromRawToVhd(`${tempDir}/${childRandomFileName}`, `${tempDir}/${child1FileName}`)
await chainVhd(handler, parentFileName, handler, child1FileName, true)
// merge
await vhdMerge(handler, parentFileName, handler, child1FileName)
await mergeVhd(handler, parentFileName, handler, child1FileName)
// check that vhd is still valid
await checkFile(parentFileName)
await checkFile(`${tempDir}/${parentFileName}`)
const parentVhd = new VhdFile(handler, parentFileName)
await parentVhd.readHeaderAndFooter()
@@ -56,7 +55,7 @@ test('merge works in normal cases', async () => {
const blockContent = block.data
const file = offset < mbOfChildren * 1024 * 1024 ? childRandomFileName : parentRandomFileName
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
const fd = await fs.open(`${tempDir}/${file}`, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
@@ -94,7 +93,7 @@ test('it can resume a merge ', async () => {
})
)
// expect merge to fail since child header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
await expect(async () => await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
await handler.unlink('.parent.vhd.merge.json')
await handler.writeFile(
@@ -109,7 +108,7 @@ test('it can resume a merge ', async () => {
})
)
// expect merge to fail since parent header is not ok
await expect(async () => await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')).rejects.toThrow()
await expect(async () => await mergeVhd(handler, 'parent.vhd', handler, ['child1.vhd'])).rejects.toThrow()
// break the end footer of parent
const size = await handler.getSize('parent.vhd')
@@ -136,7 +135,7 @@ test('it can resume a merge ', async () => {
)
// really merge
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
await mergeVhd(handler, 'parent.vhd', handler, 'child1.vhd')
// reload header footer and block allocation table , they should succed
await parentVhd.readHeaderAndFooter()
@@ -157,3 +156,53 @@ test('it can resume a merge ', async () => {
offset += parentVhd.header.blockSize
}
})
test('it merge multiple child in one pass ', async () => {
const mbOfFather = 8
const mbOfChildren = 6
const mbOfGrandChildren = 4
const parentRandomFileName = `${tempDir}/randomfile`
const childRandomFileName = `${tempDir}/small_randomfile`
const grandChildRandomFileName = `${tempDir}/another_small_randomfile`
const parentFileName = `${tempDir}/parent.vhd`
const childFileName = `${tempDir}/child.vhd`
const grandChildFileName = `${tempDir}/grandchild.vhd`
const handler = getHandler({ url: 'file://' })
await createRandomFile(parentRandomFileName, mbOfFather)
await convertFromRawToVhd(parentRandomFileName, parentFileName)
await createRandomFile(childRandomFileName, mbOfChildren)
await convertFromRawToVhd(childRandomFileName, childFileName)
await chainVhd(handler, parentFileName, handler, childFileName, true)
await createRandomFile(grandChildRandomFileName, mbOfGrandChildren)
await convertFromRawToVhd(grandChildRandomFileName, grandChildFileName)
await chainVhd(handler, childFileName, handler, grandChildFileName, true)
// merge
await mergeVhd(handler, parentFileName, handler, [grandChildFileName, childFileName])
// check that vhd is still valid
await checkFile(parentFileName)
const parentVhd = new VhdFile(handler, parentFileName)
await parentVhd.readHeaderAndFooter()
await parentVhd.readBlockAllocationTable()
let offset = 0
// check that the data are the same as source
for await (const block of parentVhd.blocks()) {
const blockContent = block.data
let file = parentRandomFileName
if (offset < mbOfGrandChildren * 1024 * 1024) {
file = grandChildRandomFileName
} else if (offset < mbOfChildren * 1024 * 1024) {
file = childRandomFileName
}
const buffer = Buffer.alloc(blockContent.length)
const fd = await fs.open(file, 'r')
await fs.read(fd, buffer, 0, buffer.length, offset)
expect(buffer.equals(blockContent)).toEqual(true)
offset += parentVhd.header.blockSize
}
})

View File

@@ -13,6 +13,7 @@ const { DISK_TYPES } = require('./_constants')
const { Disposable } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const { VhdDirectory } = require('./Vhd/VhdDirectory')
const { VhdSynthetic } = require('./Vhd/VhdSynthetic')
const { warn } = createLogger('vhd-lib:merge')
@@ -27,7 +28,8 @@ function makeThrottledWriter(handler, path, delay) {
}
}
// Merge vhd child into vhd parent.
// Merge one or multiple vhd child into vhd parent.
// childPath can be array to create a synthetic VHD from multiple VHDs
//
// TODO: rename the VHD file during the merge
module.exports = limitConcurrency(2)(async function merge(
@@ -56,16 +58,24 @@ module.exports = limitConcurrency(2)(async function merge(
flags: 'r+',
checkSecondFooter: mergeState === undefined,
})
const childVhd = yield openVhd(childHandler, childPath)
let childVhd
if (Array.isArray(childPath)) {
childVhd = yield VhdSynthetic.open(childHandler, childPath)
} else {
childVhd = yield openVhd(childHandler, childPath)
}
const concurrency = childVhd instanceof VhdDirectory ? 16 : 1
if (mergeState === undefined) {
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
if (mergeState === undefined) {
// merge should be along a vhd chain
assert.strictEqual(childVhd.header.parentUuid.equals(parentVhd.footer.uuid), true)
const parentDiskType = parentVhd.footer.diskType
assert(parentDiskType === DISK_TYPES.DIFFERENCING || parentDiskType === DISK_TYPES.DYNAMIC)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPES.DIFFERENCING)
assert.strictEqual(childVhd.header.blockSize, parentVhd.header.blockSize)
} else {
// vhd should not have changed to resume
assert.strictEqual(parentVhd.header.checksum, mergeState.parent.header)
assert.strictEqual(childVhd.header.checksum, mergeState.child.header)
}

View File

@@ -30,7 +30,7 @@
},
"devDependencies": {
"@xen-orchestra/fs": "^1.0.1",
"execa": "^6.1.0",
"execa": "^5.0.0",
"get-stream": "^6.0.0",
"rimraf": "^3.0.2",
"tmp": "^0.2.1"

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,9 +1,8 @@
const { readFile, writeFile } = require('fs/promises')
const get = require('lodash/get')
const mkdirp = require('fs-extra').ensureDir
const unset = require('lodash/unset')
const xdgBasedir = require('xdg-basedir')
import { ensureDir as mkdirp } from 'fs-extra'
import { readFile, writeFile } from 'fs/promises'
import lodashGet from 'lodash/get.js'
import lodashUnset from 'lodash/unset.js'
import xdgBasedir from 'xdg-basedir'
// ===================================================================
@@ -12,36 +11,36 @@ const configFile = configPath + '/config.json'
// ===================================================================
const load = (exports.load = function () {
export function load() {
return readFile(configFile)
.then(JSON.parse)
.catch(function () {
return {}
})
})
}
exports.get = function (path) {
export function get(path) {
return load().then(function (config) {
return get(config, path)
return lodashGet(config, path)
})
}
const save = (exports.save = function (config) {
export function save(config) {
return mkdirp(configPath).then(function () {
return writeFile(configFile, JSON.stringify(config))
})
})
}
exports.set = function (data) {
export function set(data) {
return load().then(function (config) {
return save(Object.assign(config, data))
})
}
exports.unset = function (paths) {
export function unset(paths) {
return load().then(function (config) {
;[].concat(paths).forEach(function (path) {
unset(config, path)
lodashUnset(config, path)
})
return save(config)
})

View File

@@ -1,30 +1,31 @@
#!/usr/bin/env node
const createReadStream = require('fs').createReadStream
const createWriteStream = require('fs').createWriteStream
const stat = require('fs-extra').stat
const chalk = require('chalk')
const forEach = require('lodash/forEach')
const fromCallback = require('promise-toolbox/fromCallback')
const getKeys = require('lodash/keys')
const getopts = require('getopts')
const hrp = require('http-request-plus')
const humanFormat = require('human-format')
const identity = require('lodash/identity')
const isObject = require('lodash/isObject')
const micromatch = require('micromatch')
const pairs = require('lodash/toPairs')
const pick = require('lodash/pick')
const prettyMs = require('pretty-ms')
const progressStream = require('progress-stream')
const pw = require('pw')
const Xo = require('xo-lib').default
const { PassThrough, pipeline } = require('stream')
import { createReadStream, createWriteStream, readFileSync } from 'fs'
import { PassThrough, pipeline } from 'stream'
import { stat } from 'fs/promises'
import chalk from 'chalk'
import execPromise from 'exec-promise'
import forEach from 'lodash/forEach.js'
import fromCallback from 'promise-toolbox/fromCallback'
import getKeys from 'lodash/keys.js'
import getopts from 'getopts'
import hrp from 'http-request-plus'
import humanFormat from 'human-format'
import identity from 'lodash/identity.js'
import isObject from 'lodash/isObject.js'
import micromatch from 'micromatch'
import pairs from 'lodash/toPairs.js'
import pick from 'lodash/pick.js'
import prettyMs from 'pretty-ms'
import progressStream from 'progress-stream'
import pw from 'pw'
import XoLib from 'xo-lib'
// -------------------------------------------------------------------
const config = require('./config')
import * as config from './config.mjs'
const Xo = XoLib.default
// ===================================================================
@@ -229,11 +230,13 @@ $name v$version
return pkg[key]
})
})(require('../package'))
})(JSON.parse(readFileSync(new URL('package.json', import.meta.url))))
)
// -------------------------------------------------------------------
const COMMANDS = { __proto__: null }
function main(args) {
if (!args || !args.length || args[0] === '-h') {
return help()
@@ -246,11 +249,11 @@ function main(args) {
return match[1].toUpperCase()
})
if (fnName in exports) {
return exports[fnName](args.slice(1))
if (fnName in COMMANDS) {
return COMMANDS[fnName](args.slice(1))
}
return exports.call(args).catch(error => {
return COMMANDS.call(args).catch(error => {
if (!(error != null && error.code === 10 && 'errors' in error.data)) {
throw error
}
@@ -263,11 +266,10 @@ function main(args) {
throw lines.join('\n')
})
}
exports = module.exports = main
// -------------------------------------------------------------------
exports.help = help
COMMANDS.help = help
async function createToken(args) {
const token = await _createToken(await parseRegisterArgs(args))
@@ -275,7 +277,7 @@ async function createToken(args) {
console.warn()
console.log(token)
}
exports.createToken = createToken
COMMANDS.createToken = createToken
async function register(args) {
const opts = await parseRegisterArgs(args)
@@ -286,12 +288,12 @@ async function register(args) {
token: await _createToken(opts),
})
}
exports.register = register
COMMANDS.register = register
function unregister() {
return config.unset(['server', 'token'])
}
exports.unregister = unregister
COMMANDS.unregister = unregister
async function listCommands(args) {
const xo = await connect()
@@ -350,7 +352,7 @@ async function listCommands(args) {
})
return str.join('')
}
exports.listCommands = listCommands
COMMANDS.listCommands = listCommands
async function listObjects(args) {
const properties = getKeys(extractFlags(args))
@@ -374,7 +376,7 @@ async function listObjects(args) {
}
stdout.write(']\n')
}
exports.listObjects = listObjects
COMMANDS.listObjects = listObjects
function ensurePathParam(method, value) {
if (typeof value !== 'string') {
@@ -454,10 +456,8 @@ async function call(args) {
return result
}
exports.call = call
COMMANDS.call = call
// ===================================================================
if (!module.parent) {
require('exec-promise')(exports)
}
execPromise(main)

View File

@@ -22,12 +22,11 @@
"url": "https://vates.fr"
},
"preferGlobal": true,
"main": "dist/",
"bin": {
"xo-cli": "dist/index.js"
"xo-cli": "./index.mjs"
},
"engines": {
"node": ">=14"
"node": ">=14.13"
},
"dependencies": {
"chalk": "^4.1.0",
@@ -45,20 +44,7 @@
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -89,27 +89,6 @@ listingDebounce = '1 min'
vhdDirectoryCompression = 'brotli'
[backups.defaultSettings]
reportWhen = 'failure'
[backups.metadata.defaultSettings]
retentionPoolMetadata = 0
retentionXoMetadata = 0
[backups.vm.defaultSettings]
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
copyRetention = 0
deleteFirst = false
exportRetention = 0
fullInterval = 0
offlineBackup = false
offlineSnapshot = false
snapshotRetention = 0
timeout = 0
vmTimeout = 0
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674

View File

@@ -20,7 +20,7 @@ Cookie: authenticationToken=TN2YBOMYtXB_hHtf4wTzm9p5tTuqq2i15yeuhcz2xXM
The server will respond to an invalid token with a `401 Unauthorized` status.
The server can request that the client updates its token with a `Set-Cookie` header:
**[Not implemented at this time]** The server can request that the client updates its token with a `Set-Cookie` header:
```http
HTTP/1.1 200 OK

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.93.0",
"version": "5.93.1",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -39,17 +39,17 @@
"@vates/predicates": "^1.0.0",
"@vates/read-chunk": "^0.1.2",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.22.0",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.1",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.3.1",
"@xen-orchestra/mixins": "^0.4.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/template": "^0.1.0",
"@xen-orchestra/xapi": "^0.11.0",
"@xen-orchestra/xapi": "^1.0.0",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.0.1",

View File

@@ -116,6 +116,7 @@ listMissingPatches.resolve = {
// -------------------------------------------------------------------
export async function installPatches({ pool, patches, hosts }) {
const opts = { patches }
let xapi
if (pool !== undefined) {
pool = this.getXapiObject(pool, 'pool')
@@ -123,6 +124,7 @@ export async function installPatches({ pool, patches, hosts }) {
hosts = Object.values(xapi.objects.indexes.type.host)
} else {
hosts = hosts.map(_ => this.getXapiObject(_))
opts.hosts = hosts
xapi = hosts[0].$xapi
pool = xapi.pool
}
@@ -136,7 +138,7 @@ export async function installPatches({ pool, patches, hosts }) {
})
}
await xapi.installPatches({ hosts, patches })
await xapi.installPatches(opts)
const masterRef = pool.master
if (moveFirst(hosts, _ => _.$ref === masterRef)) {

View File

@@ -62,17 +62,6 @@ export async function copyVm({ vm, sr }) {
console.log('import full VM...')
await tgtXapi.VM_destroy((await tgtXapi.importVm(input, { srId: sr })).$ref)
}
// delta
{
console.log('export delta VM...')
const input = await srcXapi.exportDeltaVm(vm)
console.log('import delta VM...')
const { vm: copyVm } = await tgtXapi.importDeltaVm(input, {
srId: sr,
})
await tgtXapi.VM_destroy(copyVm.$ref)
}
}
copyVm.description = 'export/import full/delta VM'

View File

@@ -111,21 +111,29 @@ export async function set({
await xapi.deleteVif(vif._xapiId)
// create new VIF with new parameters
const newVif = await xapi.createVif(vm.$id, network.$id, {
mac,
currently_attached: attached,
ipv4_allowed: newIpv4Addresses,
ipv6_allowed: newIpv6Addresses,
// - If locking mode has explicitly passed: use it
// - Else if the network is changing: config it to 'network_default'
// - Else: use the old locking mode
locking_mode: lockingMode ?? (isNetworkChanged ? 'network_default' : vif.lockingMode),
qos_algorithm_type: rateLimit != null ? 'ratelimit' : undefined,
qos_algorithm_params: rateLimit != null ? { kbps: String(rateLimit) } : undefined,
other_config: {
'ethtool-tx': txChecksumming !== undefined ? String(txChecksumming) : undefined,
},
})
const newVif = await xapi._getOrWaitObject(
await xapi.VIF_create(
{
currently_attached: attached,
ipv4_allowed: newIpv4Addresses,
ipv6_allowed: newIpv6Addresses,
// - If locking mode has explicitly passed: use it
// - Else if the network is changing: config it to 'network_default'
// - Else: use the old locking mode
locking_mode: lockingMode ?? (isNetworkChanged ? 'network_default' : vif.lockingMode),
qos_algorithm_type: rateLimit != null ? 'ratelimit' : undefined,
qos_algorithm_params: rateLimit != null ? { kbps: String(rateLimit) } : undefined,
network: network.$ref,
other_config: {
'ethtool-tx': txChecksumming !== undefined ? String(txChecksumming) : undefined,
},
VM: vm.$ref,
},
{
MAC: mac,
}
)
)
await this.allocIpAddresses(newVif.$id, newIpAddresses)

View File

@@ -1217,12 +1217,21 @@ export async function createInterface({ vm, network, position, mac, allowedIpv4A
}
let ipAddresses
const vif = await this.getXapi(vm).createVif(vm._xapiId, network._xapiId, {
mac,
position,
ipv4_allowed: allowedIpv4Addresses,
ipv6_allowed: allowedIpv6Addresses,
})
const xapi = this.getXapi(vm)
const vif = await xapi._getOrWaitObject(
await xapi.VIF_create(
{
device: position !== undefined ? String(position) : undefined,
ipv4_allowed: allowedIpv4Addresses,
ipv6_allowed: allowedIpv6Addresses,
network: network._xapiRef,
VM: vm._xapiRef,
},
{
MAC: mac,
}
)
)
const { push } = (ipAddresses = [])
if (allowedIpv4Addresses) {

View File

@@ -924,7 +924,14 @@ async function _prepareGlusterVm(
if (error.code === 'MESSAGE_METHOD_UNKNOWN') {
// VIF.move has been introduced in xenserver 7.0
await xapi.deleteVif(firstVif.$id)
await xapi.createVif(newVM.$id, xosanNetwork.$id, firstVif)
await xapi.VIF_create(
{
...firstVif,
VM: newVM.$ref,
network: xosanNetwork.$ref,
},
firstVif
)
}
}
}

View File

@@ -6,7 +6,6 @@ import filter from 'lodash/filter.js'
import find from 'lodash/find.js'
import flatMap from 'lodash/flatMap.js'
import flatten from 'lodash/flatten.js'
import groupBy from 'lodash/groupBy.js'
import identity from 'lodash/identity.js'
import includes from 'lodash/includes.js'
import isEmpty from 'lodash/isEmpty.js'
@@ -14,9 +13,7 @@ import mapToArray from 'lodash/map.js'
import mixin from '@xen-orchestra/mixin/legacy.js'
import ms from 'ms'
import noop from 'lodash/noop.js'
import omit from 'lodash/omit.js'
import once from 'lodash/once.js'
import semver from 'semver'
import tarStream from 'tar-stream'
import uniq from 'lodash/uniq.js'
import { asyncMap } from '@xen-orchestra/async-map'
@@ -33,9 +30,7 @@ import { Xapi as XapiBase } from '@xen-orchestra/xapi'
import { Ref } from 'xen-api'
import { synchronized } from 'decorator-synchronized'
import ensureArray from '../_ensureArray.mjs'
import fatfsBuffer, { init as fatfsBufferInit } from '../fatfs-buffer.mjs'
import { asyncMapValues } from '../_asyncMapValues.mjs'
import { camelToSnakeCase, forEach, map, parseSize, pDelay, promisifyAll } from '../utils.mjs'
import mixins from './mixins/index.mjs'
@@ -65,11 +60,6 @@ class AggregateError extends Error {
// ===================================================================
const TAG_BASE_DELTA = 'xo:base_delta'
export const TAG_COPY_SRC = 'xo:copy_of'
// ===================================================================
export * from './utils.mjs'
// VDI formats. (Raw is not available for delta vdi.)
@@ -593,352 +583,6 @@ export default class Xapi extends XapiBase {
return writeStream
}
// Create a snapshot (if necessary) of the VM and returns a delta export
// object.
@cancelable
@decorateWith(deferrable)
async exportDeltaVm(
$defer,
$cancelToken,
vmId,
baseVmId,
{
bypassVdiChainsCheck = false,
// Contains a vdi.$id set of vmId.
fullVdisRequired = [],
disableBaseTags = false,
snapshotNameLabel = undefined,
} = {}
) {
let vm = this.getObject(vmId)
// do not use the snapshot name in the delta export
const exportedNameLabel = vm.name_label
if (!vm.is_a_snapshot) {
if (!bypassVdiChainsCheck) {
await this.VM_assertHealthyVdiChains(vm.$ref)
}
vm = await this.getRecord(
'VM',
await this.VM_snapshot(vm.$ref, { cancelToken: $cancelToken, name_label: snapshotNameLabel })
)
$defer.onFailure(() => this.VM_destroy(vm.$ref))
}
const baseVm = baseVmId && this.getObject(baseVmId)
// refs of VM's VDIs → base's VDIs.
const baseVdis = {}
baseVm &&
forEach(baseVm.$VBDs, vbd => {
let vdi, snapshotOf
if (
(vdi = vbd.$VDI) &&
(snapshotOf = vdi.$snapshot_of) &&
!find(fullVdisRequired, id => snapshotOf.$id === id)
) {
baseVdis[vdi.snapshot_of] = vdi
}
})
const streams = {}
const vdis = {}
const vbds = {}
forEach(vm.$VBDs, vbd => {
let vdi
if (vbd.type !== 'Disk' || !(vdi = vbd.$VDI)) {
// Ignore this VBD.
return
}
// If the VDI name start with `[NOBAK]`, do not export it.
if (vdi.name_label.startsWith('[NOBAK]')) {
// FIXME: find a way to not create the VDI snapshot in the
// first time.
//
// The snapshot must not exist otherwise it could break the
// next export.
vdi.$destroy()::ignoreErrors()
return
}
vbds[vbd.$ref] = vbd
const vdiRef = vdi.$ref
if (vdiRef in vdis) {
// This VDI has already been managed.
return
}
// Look for a snapshot of this vdi in the base VM.
const baseVdi = baseVdis[vdi.snapshot_of]
vdis[vdiRef] = {
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
},
$SR$uuid: vdi.$SR.uuid,
}
streams[`${vdiRef}.vhd`] = () => this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
})
const suspendVdi = vm.$suspend_VDI
if (suspendVdi !== undefined) {
const vdiRef = suspendVdi.$ref
vdis[vdiRef] = {
...suspendVdi,
$SR$uuid: suspendVdi.$SR.uuid,
}
streams[`${vdiRef}.vhd`] = () => this._exportVdi($cancelToken, suspendVdi, undefined, VDI_FORMAT_VHD)
}
const vifs = {}
forEach(vm.$VIFs, vif => {
const network = vif.$network
vifs[vif.$ref] = {
...vif,
$network$uuid: network.uuid,
$network$name_label: network.name_label,
// https://github.com/babel/babel-eslint/issues/595
// eslint-disable-next-line no-undef
$network$VLAN: network.$PIFs[0]?.VLAN,
}
})
return Object.defineProperty(
{
version: '1.1.0',
vbds,
vdis,
vifs,
vm: {
...vm,
name_label: exportedNameLabel,
other_config:
baseVm && !disableBaseTags
? {
...vm.other_config,
[TAG_BASE_DELTA]: baseVm.uuid,
}
: omit(vm.other_config, TAG_BASE_DELTA),
},
},
'streams',
{
configurable: true,
value: streams,
writable: true,
}
)
}
@decorateWith(deferrable)
async importDeltaVm(
$defer,
delta,
{
deleteBase = false,
detectBase = true,
disableStartAfterImport = true,
mapVdisSrs = {},
name_label = delta.vm.name_label,
srId = this.pool.default_SR,
} = {}
) {
const { version } = delta
if (!semver.satisfies(version, '^1')) {
throw new Error(`Unsupported delta backup version: ${version}`)
}
let baseVm
if (detectBase) {
const remoteBaseVmUuid = delta.vm.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(this.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
}
const baseVdis = {}
baseVm &&
forEach(baseVm.$VBDs, vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
// 0. Create suspend_VDI
let suspendVdi
if (delta.vm.power_state === 'Suspended') {
const vdi = delta.vdis[delta.vm.suspend_VDI]
suspendVdi = await this.createVdi({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
sr: mapVdisSrs[vdi.uuid] || srId,
})
$defer.onFailure.call(this, 'VDI_destroy', suspendVdi.$ref)
}
// 1. Create the VMs.
const vm = await this._getOrWaitObject(
await this._createVmRecord(
{
...delta.vm,
affinity: null,
blocked_operations: {
...delta.vm.blocked_operations,
start: 'Importing…',
start_on: 'Importing…',
},
ha_always_run: false,
is_a_template: false,
name_label: `[Importing…] ${name_label}`,
other_config: {
...delta.vm.other_config,
[TAG_COPY_SRC]: delta.vm.uuid,
},
},
{ suspend_VDI: suspendVdi?.$ref }
)
)
$defer.onFailure(() => this.VM_destroy(vm.$ref))
// 2. Delete all VBDs which may have been created by the import.
await asyncMapSettled(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
// 3. Create VDIs & VBDs.
//
// TODO: move all VDIs creation before the VM and simplify the code
const vbds = groupBy(delta.vbds, 'VDI')
const newVdis = await asyncMapValues(delta.vdis, async (vdi, vdiRef) => {
let newVdi
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (remoteBaseVdiUuid) {
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
} else if (vdiRef === delta.vm.suspend_VDI) {
// suspend VDI has been already created
newVdi = suspendVdi
} else {
newVdi = await this.createVdi({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
sr: mapVdisSrs[vdi.uuid] || srId,
})
$defer.onFailure(() => newVdi.$destroy())
}
await asyncMapSettled(vbds[vdiRef], vbd =>
this.createVbd({
...vbd,
vdi: newVdi,
vm,
})
)
return newVdi
})
const networksByNameLabelByVlan = {}
let defaultNetwork
forEach(this.objects.all, object => {
if (object.$type === 'network') {
const pif = object.$PIFs[0]
if (pif === undefined) {
// ignore network
return
}
const vlan = pif.VLAN
const networksByNameLabel = networksByNameLabelByVlan[vlan] || (networksByNameLabelByVlan[vlan] = {})
defaultNetwork = networksByNameLabel[object.name_label] = object
}
})
const { streams } = delta
await Promise.all([
// Import VDI contents.
asyncMapSettled(newVdis, async (vdi, id) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
if (typeof stream === 'function') {
stream = await stream()
}
await this._importVdiContent(vdi, stream, VDI_FORMAT_VHD)
}
}),
// Wait for VDI export tasks (if any) termination.
asyncMapSettled(streams, stream => stream.task),
// Create VIFs.
asyncMapSettled(delta.vifs, vif => {
let network = vif.$network$uuid && this.getObject(vif.$network$uuid, undefined)
if (network === undefined) {
const { $network$VLAN: vlan = -1 } = vif
const networksByNameLabel = networksByNameLabelByVlan[vlan]
if (networksByNameLabel !== undefined) {
network = networksByNameLabel[vif.$network$name_label]
if (network === undefined) {
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
}
} else {
network = defaultNetwork
}
}
if (network) {
return this._createVif(vm, network, vif)
}
}),
])
if (deleteBase && baseVm) {
this.VM_destroy(baseVm.$ref)::ignoreErrors()
}
await Promise.all([
delta.vm.ha_always_run && vm.set_ha_always_run(true),
vm.set_name_label(name_label),
// FIXME: move
asyncMap(['start', 'start_on'], op =>
vm.update_blocked_operations(
op,
disableStartAfterImport ? 'Do not start this VM, clone it if you want to use it.' : null
)
),
])
return { vm }
}
async _migrateVmWithStorageMotion(
vm,
hostXapi,
@@ -1204,8 +848,10 @@ export default class Xapi extends XapiBase {
})
}).concat(
map(networks, (networkId, i) =>
this._createVif(vm, this.getObject(networkId), {
this.VIF_create({
device: vifDevices[i],
network: this.getObject(networkId).$ref,
VM: vm.$ref,
})
)
)
@@ -1801,61 +1447,6 @@ export default class Xapi extends XapiBase {
// =================================================================
async _createVif(
vm,
network,
{
mac = '',
position = undefined,
currently_attached = true,
device = position != null ? String(position) : undefined,
ipv4_allowed = undefined,
ipv6_allowed = undefined,
locking_mode = undefined,
MAC = mac,
other_config = {},
qos_algorithm_params = {},
qos_algorithm_type = '',
} = {}
) {
log.debug(`Creating VIF for VM ${vm.name_label} on network ${network.name_label}`)
if (device == null) {
device = (await this.call('VM.get_allowed_VIF_devices', vm.$ref))[0]
}
const vifRef = await this.call(
'VIF.create',
filterUndefineds({
currently_attached: vm.power_state === 'Suspended' ? currently_attached : undefined,
device,
ipv4_allowed,
ipv6_allowed,
locking_mode,
MAC,
MTU: asInteger(network.MTU),
network: network.$ref,
other_config,
qos_algorithm_params,
qos_algorithm_type,
VM: vm.$ref,
})
)
if (currently_attached && isVmRunning(vm)) {
await this.callAsync('VIF.plug', vifRef)
}
return vifRef
}
async createVif(vmId, networkId, opts = undefined) {
return /* await */ this._getOrWaitObject(
await this._createVif(this.getObject(vmId), this.getObject(networkId), opts)
)
}
@decorateWith(deferrable)
async createNetwork($defer, { name, description = 'Created with Xen Orchestra', pifId, mtu, vlan }) {
const networkRef = await this.call('network.create', {

View File

@@ -190,14 +190,20 @@ export default {
const devices = await this.call('VM.get_allowed_VIF_devices', vm.$ref)
await Promise.all(
mapToArray(vifs, (vif, index) =>
this._createVif(vm, this.getObject(vif.network), {
ipv4_allowed: vif.ipv4_allowed,
ipv6_allowed: vif.ipv6_allowed,
device: devices[index],
locking_mode: isEmpty(vif.ipv4_allowed) && isEmpty(vif.ipv6_allowed) ? 'network_default' : 'locked',
mac: vif.mac,
mtu: vif.mtu,
})
this.VIF_create(
{
ipv4_allowed: vif.ipv4_allowed,
ipv6_allowed: vif.ipv6_allowed,
device: devices[index],
locking_mode: isEmpty(vif.ipv4_allowed) && isEmpty(vif.ipv6_allowed) ? 'network_default' : 'locked',
MTU: vif.mtu,
network: this.getObject(vif.network).$ref,
VM: vm.$ref,
},
{
MAC: vif.mac,
}
)
)
)
}

View File

@@ -164,15 +164,18 @@ export default class {
// -----------------------------------------------------------------
async createAuthenticationToken({ expiresIn, userId }) {
let duration = this._defaultTokenValidity
if (expiresIn !== undefined) {
duration = parseDuration(expiresIn)
if (duration > this._maxTokenValidity) {
throw new Error('too high expiresIn duration: ' + expiresIn)
}
}
const token = new Token({
id: await generateToken(),
user_id: userId,
expiration:
Date.now() +
Math.min(
expiresIn !== undefined ? parseDuration(expiresIn) : this._defaultTokenValidity,
this._maxTokenValidity
),
expiration: Date.now() + duration,
})
await this._tokens.add(token)

View File

@@ -81,7 +81,7 @@ export default class BackupNg {
// different than the VMs in the job itself.
let vmIds = data?.vms ?? extractIdsFromSimplePattern(vmsPattern)
await this.checkAuthorizations({ job, schedule, useSmartBackup: vmIds === undefined })
await this._checkAuthorizations({ job, schedule, useSmartBackup: vmIds === undefined })
if (vmIds === undefined) {
const poolPattern = vmsPattern.$pool
@@ -301,7 +301,7 @@ export default class BackupNg {
return job
}
async checkAuthorizations({ job, useSmartBackup, schedule }) {
async _checkAuthorizations({ job, useSmartBackup, schedule }) {
const { _app: app } = this
if (job.type === 'metadataBackup') {
@@ -327,12 +327,15 @@ export default class BackupNg {
// this won't check a per VM settings
const config = app.config.get('backups')
// FIXME: does not take into account default values defined in @xen-orchestra/backups/Backup
const jobSettings = {
...config.defaultSettings,
...config.vm.defaultSettings,
...config.vm?.defaultSettings,
...job.settings[''],
...job.settings[schedule.id],
}
if (jobSettings.checkpointSnapshot === true) {
await app.checkFeatureAuthorization('BACKUP.WITH_RAM')
}

View File

@@ -226,7 +226,10 @@ export default class Proxy {
$defer.onFailure(() => app.unbindLicense(arg))
if (networkId !== undefined) {
await Promise.all([...vm.VIFs.map(vif => xapi.deleteVif(vif)), xapi.createVif(vm.$id, networkId)])
await Promise.all([
...vm.VIFs.map(vif => xapi.deleteVif(vif)),
xapi.VIF_create({ network: xapi.getObject(networkId).$ref, VM: vm.$ref }),
])
}
const date = new Date()

View File

@@ -50,7 +50,11 @@ const valueEncoding = {
export default class {
constructor(app) {
const dir = `${app.config.get('datadir')}/leveldb`
this._db = fse.ensureDir(dir).then(() => levelup(dir))
this._db = (async () => {
await fse.ensureDir(dir)
await fse.access(dir, fse.constants.R_OK | fse.constants.W_OK)
return levelup(dir)
})()
}
async getStore(namespace) {

View File

@@ -1,7 +1,7 @@
import Config from '@xen-orchestra/mixins/Config.js'
import Config from '@xen-orchestra/mixins/Config.mjs'
import forEach from 'lodash/forEach.js'
import Hooks from '@xen-orchestra/mixins/Hooks.js'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.js'
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
import includes from 'lodash/includes.js'
import isEmpty from 'lodash/isEmpty.js'
import iteratee from 'lodash/iteratee.js'

View File

@@ -125,8 +125,10 @@ export default decorate([
type,
}
if (type === 's3') {
const { bucket, directory } = state
const { allowUnauthorized, bucket, directory, protocol = 'https' } = state
urlParams.path = bucket + '/' + directory
urlParams.allowUnauthorized = allowUnauthorized
urlParams.protocol = protocol
}
username && (urlParams.username = username)
password && (urlParams.password = password)

View File

@@ -53,6 +53,7 @@ const FILTER_TYPE_TO_LABEL_ID = {
pool: 'homeTypePool',
VM: 'homeTypeVm',
'VM-template': 'homeTypeVmTemplate',
SR: 'homeTypeSr',
}
const SSH_KEY_STYLE = { wordWrap: 'anywhere' }

2751
yarn.lock

File diff suppressed because it is too large Load Diff