Compare commits

..

1 Commits

Author SHA1 Message Date
Thierry Goettelmann
135dda5290 XO E2E testing 2022-05-25 10:37:43 +02:00
276 changed files with 19969 additions and 23314 deletions

View File

@@ -4,6 +4,7 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

2
.gitignore vendored
View File

@@ -10,6 +10,8 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,16 +9,7 @@ class AggregateError extends Error {
}
}
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -36,7 +36,7 @@ describe('asyncEach', () => {
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
await asyncEach.call(thisArg, iterable, iteratee)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
@@ -66,7 +66,7 @@ describe('asyncEach', () => {
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
@@ -91,9 +91,7 @@ describe('asyncEach', () => {
}
})
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
'asyncEach aborted'
)
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})

View File

@@ -24,7 +24,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
}
add(type, listener) {
let listeners = this._listeners.get(type)
let listeners = this._listeners[type]
if (listeners === undefined) {
listeners = new Set()
this._listeners.set(type, listeners)

View File

@@ -1,67 +0,0 @@
'use strict'
const t = require('tap')
const { EventEmitter } = require('events')
const { EventListenersManager } = require('./')
const noop = Function.prototype
// function spy (impl = Function.prototype) {
// function spy() {
// spy.calls.push([Array.from(arguments), this])
// }
// spy.calls = []
// return spy
// }
function assertListeners(t, event, listeners) {
t.strictSame(t.context.ee.listeners(event), listeners)
}
t.beforeEach(function (t) {
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})
t.test('.add adds a listener', function (t) {
t.context.em.add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.add does not add a duplicate listener', function (t) {
t.context.em.add('foo', noop).add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.remove removes a listener', function (t) {
t.context.em.add('foo', noop).remove('foo', noop)
assertListeners(t, 'foo', [])
t.end()
})
t.test('.removeAll removes all listeners of a given type', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [noop])
t.end()
})
t.test('.removeAll removes all listeners', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll()
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [])
t.end()
})

View File

@@ -35,12 +35,8 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.1",
"version": "1.0.0",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^16.2.0"
"postversion": "npm publish --access public"
}
}

View File

@@ -1,9 +1,6 @@
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
@@ -14,13 +11,3 @@ import { readChunk } from '@vates/read-chunk'
}
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```

View File

@@ -16,12 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
## Usage
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
- returns `null` if the stream has ended
```js
import { readChunk } from '@vates/read-chunk'
@@ -33,16 +30,6 @@ import { readChunk } from '@vates/read-chunk'
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -30,22 +30,3 @@ const readChunk = (stream, size) =>
onReadable()
})
exports.readChunk = readChunk
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}

View File

@@ -4,7 +4,7 @@
const { Readable } = require('stream')
const { readChunk, readChunkStrict } = require('./')
const { readChunk } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
@@ -43,27 +43,3 @@ describe('readChunk', () => {
})
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended without data')
expect(error.chunk).toEqual(undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
expect(error).toBeInstanceOf(Error)
expect(error.message).toBe('stream has ended with not enough data')
expect(error.chunk).toEqual(Buffer.from('foobar'))
})
})

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "1.0.0",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},

View File

@@ -26,13 +26,7 @@ module.exports = async function main(args) {
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.7",
"version": "0.7.1",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
@@ -36,8 +36,6 @@ const DEFAULT_VM_SETTINGS = {
deleteFirst: false,
exportRetention: 0,
fullInterval: 0,
healthCheckSr: undefined,
healthCheckVmsWithTags: [],
maxMergedDeltasPerRun: 2,
offlineBackup: false,
offlineSnapshot: false,
@@ -217,7 +215,6 @@ exports.Backup = class Backup {
const schedule = this._schedule
const config = this._config
const settings = this._settings
await Disposable.use(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
@@ -245,14 +242,15 @@ exports.Backup = class Backup {
})
)
),
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
async (srs, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
// remove srs that failed (already handled)
srs = srs.filter(_ => _ !== undefined)
const settings = this._settings
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
return
}
@@ -273,7 +271,6 @@ exports.Backup = class Backup {
baseSettings,
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
schedule,

View File

@@ -1,64 +0,0 @@
'use strict'
const { Task } = require('./Task')
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
#xapi
#restoredVm
constructor({ restoredVm, xapi }) {
this.#restoredVm = restoredVm
this.#xapi = xapi
}
async run() {
return Task.run(
{
name: 'vmstart',
},
async () => {
let restoredVm = this.#restoredVm
const xapi = this.#xapi
const restoredId = restoredVm.uuid
// remove vifs
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
const start = new Date()
// start Vm
await xapi.callAsync(
'VM.start',
restoredVm.$ref,
false, // Start paused?
false // Skip pre-boot checks?
)
const started = new Date()
const timeout = 10 * 60 * 1000
const startDuration = started - start
let remainingTimeout = timeout - startDuration
if (remainingTimeout < 0) {
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
}
// wait for the 'Running' event to be really stored in local xapi object cache
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
timeout: remainingTimeout,
})
const running = new Date()
remainingTimeout -= running - started
if (remainingTimeout < 0) {
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
}
// wait for the guest tool version to be defined
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
timeout: remainingTimeout,
})
}
)
}
}

View File

@@ -15,7 +15,7 @@ const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, lstat } = require('fs-extra')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
@@ -47,12 +47,13 @@ const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
@@ -279,7 +280,7 @@ class RemoteAdapter {
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, logWarn: warn })
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
@@ -291,7 +292,7 @@ class RemoteAdapter {
}
#useVhdDirectory() {
return this.handler.useVhdDirectory()
return this.handler.type === 's3'
}
#useAlias() {
@@ -382,12 +383,8 @@ class RemoteAdapter {
const entriesMap = {}
await asyncMap(await readdir(path), async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error

View File

@@ -3,10 +3,8 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
const logAfterEnd = () => {
throw new Error('task has already ended')
}
const noop = Function.prototype

View File

@@ -45,18 +45,7 @@ const forkDeltaExport = deltaExport =>
})
class VmBackup {
constructor({
config,
getSnapshotNameLabel,
healthCheckSr,
job,
remoteAdapters,
remotes,
schedule,
settings,
srs,
vm,
}) {
constructor({ baseSettings, config, getSnapshotNameLabel, job, remoteAdapters, schedule, settings, srs, vm }) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
// don't match replicated VMs created by this very job otherwise they
// will be replicated again and again
@@ -79,7 +68,6 @@ class VmBackup {
this._fullVdisRequired = undefined
this._getSnapshotNameLabel = getSnapshotNameLabel
this._isDelta = job.mode === 'delta'
this._healthCheckSr = healthCheckSr
this._jobId = job.id
this._jobSnapshots = undefined
this._xapi = vm.$xapi
@@ -106,6 +94,7 @@ class VmBackup {
: [FullBackupWriter, FullReplicationWriter]
const allSettings = job.settings
Object.keys(remoteAdapters).forEach(remoteId => {
const targetSettings = {
...settings,
@@ -128,49 +117,35 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, step, parallel = true) {
async _callWriters(fn, warnMessage, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
try {
await fn(writer)
} catch (error) {
writers.delete(writer)
throw error
}
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await callWriter(writer)
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
}
}
@@ -210,7 +185,6 @@ class VmBackup {
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
ignoreNobakVdis: true,
name_label: this._getSnapshotNameLabel(vm),
unplugVusbs: true,
})
this.timestamp = Date.now()
@@ -421,24 +395,6 @@ class VmBackup {
this._fullVdisRequired = fullVdisRequired
}
async _healthCheck() {
const settings = this._settings
if (this._healthCheckSr === undefined) {
return
}
// check if current VM has tags
const { tags } = this.vm
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
return
}
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
}
async run($defer) {
const settings = this._settings
assert(
@@ -448,9 +404,7 @@ class VmBackup {
await this._callWriters(async writer => {
await writer.beforeBackup()
$defer(async () => {
await writer.afterBackup()
})
$defer(() => writer.afterBackup())
}, 'writer.beforeBackup()')
await this._fetchJobSnapshots()
@@ -486,7 +440,6 @@ class VmBackup {
await this._fetchJobSnapshots()
await this._removeUnusedSnapshots()
}
await this._healthCheck()
}
}
exports.VmBackup = VmBackup

View File

@@ -35,7 +35,7 @@ afterEach(async () => {
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
async function generateVhd(path, opts = {}) {
let vhd
@@ -78,15 +78,15 @@ test('It remove broken vhd', async () => {
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
expect(loggued).toEqual(`VHD check error`)
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
@@ -118,13 +118,15 @@ test('it remove vhd with missing or multiple ancestors', async () => {
)
// clean
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -157,12 +159,14 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
})
let loggued = ''
const logInfo = message => {
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
@@ -179,8 +183,8 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
@@ -216,16 +220,16 @@ test('it merges delta of non destroyed chain', async () => {
})
let loggued = []
const logInfo = message => {
const onLog = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [merging] = loggued
expect(merging).toEqual(`merging VHD chain`)
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
@@ -271,7 +275,7 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@@ -378,7 +382,7 @@ describe('tests multiple combination ', () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
@@ -414,7 +418,7 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
await adapter.cleanVm('/', { remove: true })
expect(await handler.list(basePath)).toEqual([])
})
@@ -429,11 +433,7 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))

View File

@@ -1,27 +1,22 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -29,48 +24,73 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(handler, vhds)) {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
if (merge) {
logInfo(`merging VHD chain`, { chain })
onLog(`merging ${children.length} children into ${parent}`)
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
}
}, 10e3)
try {
return await mergeVhdChain(handler, chain, {
logInfo,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
onLog(`the VHD ${child} is already merged`)
if (remove) {
onLog(`deleting merged VHD ${child}`)
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir, logWarn) => {
const listVhds = async (handler, vmDir) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -90,23 +110,12 @@ const listVhds = async (handler, vmDir, logWarn) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
await asyncMap(list, async file => {
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
@@ -116,21 +125,16 @@ const listVhds = async (handler, vmDir, logWarn) => {
return { vhds, interruptedVhds, aliases }
}
async function checkAliases(
aliasPaths,
targetDataRepository,
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
const aliasFound = []
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { alias, target })
onLog(`Alias ${path} references a non vhd target: ${target}`)
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(alias)
await handler.unlink(path)
}
continue
}
@@ -143,13 +147,13 @@ async function checkAliases(
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { alias, target, error })
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
if (remove) {
try {
await VhdAbstract.unlink(handler, alias)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
}
}
}
@@ -159,45 +163,42 @@ async function checkAliases(
aliasFound.push(resolve('/', target))
}
const vhds = await handler.list(targetDataRepository, {
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
if (remove) {
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
) {
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhds, async path => {
try {
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), async vhd => {
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), vhd => {
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
@@ -210,32 +211,12 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
await vhd.check()
})
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken VHD', { path })
onLog(`deleting broken ${path}`)
return VhdAbstract.unlink(handler, path)
}
}
@@ -244,15 +225,15 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const { statePath } = interruptedVhds.get(interruptedVhd)
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
onLog('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
logInfo('deleting orphan merge state', { statePath })
onLog(`deleting orphan merge state ${statePath}`)
await handler.unlink(statePath)
}
}
@@ -261,7 +242,7 @@ exports.cleanVm = async function cleanVm(
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})
// remove VHDs with missing ancestors
@@ -283,9 +264,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
logWarn('parent VHD is missing', { parent, child: vhdPath })
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
if (remove) {
logInfo('deleting orphan VHD', { path: vhdPath })
onLog(`deleting orphan VHD ${vhdPath}`)
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -322,7 +303,7 @@ exports.cleanVm = async function cleanVm(
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await this.isValidXva(path))) {
logWarn('XVA might be broken', { path })
onLog(`the XVA with path ${path} is potentially broken`)
}
})
@@ -336,7 +317,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read backup metadata', { path: json, error })
onLog(`failed to read metadata file ${json}`, { error })
jsons.delete(json)
return
}
@@ -347,9 +328,9 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
logInfo('deleting incomplete backup', { path: json })
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
@@ -370,9 +351,9 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { path: json })
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
@@ -384,7 +365,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -408,14 +389,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.unshift(vhd)
chain.push(vhd)
return chain
}
}
logWarn('unused VHD', { path: vhd })
onLog(`the VHD ${vhd} is unused`)
if (remove) {
logInfo('deleting unused VHD', { path: vhd })
onLog(`deleting unused VHD ${vhd}`)
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -426,13 +407,7 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -445,9 +420,9 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge })
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -457,18 +432,18 @@ exports.cleanVm = async function cleanVm(
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
logWarn('unused XVA', { path })
onLog(`the XVA ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA', { path })
onLog(`deleting unused XVA ${path}`)
return handler.unlink(path)
}
}),
asyncMap(xvaSums, path => {
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
logInfo('unused XVA checksum', { path })
onLog(`the XVA checksum ${path} is unused`)
if (remove) {
logInfo('deleting unused XVA checksum', { path })
onLog(`deleting unused XVA checksum ${path}`)
return handler.unlink(path)
}
}
@@ -490,11 +465,7 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -506,15 +477,11 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
}
}
} catch (error) {
logWarn('failed to get backup size', { backup: metadataPath, error })
onLog(`failed to get size of ${metadataPath}`, { error })
return
}
@@ -524,7 +491,7 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
}
}
})

View File

@@ -3,8 +3,6 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
@@ -13,23 +11,18 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: stream.forks })
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
proxy.destroy(error)
}
})
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
eos(proxy, _ => {
stream.forks--
stream.unpipe(proxy)
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
stream.destroy(new Error('no more consumers for this stream'))
}
})

View File

@@ -49,11 +49,6 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -71,6 +66,7 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -14,8 +14,6 @@
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
@@ -32,19 +30,6 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Attributes
### Of created snapshots
@@ -84,8 +69,6 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
├─ task.warning(message: string)
├─ task.start(data: { type: 'VM', id: string })
│ ├─ task.warning(message: string)
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
@@ -94,20 +77,16 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ │ // in case there is a healthcheck scheduled for this vm in this job
│ │ ├─ task.start(message: 'health check')
│ │ │ ├─ task.start(message: 'transfer')
│ │ │ │ └─ task.end(result: { size: number })
│ │ │ ├─ task.start(message: 'vmstart')
│ │ │ │ └─ task.end
│ │ │ └─ task.end
│ │ │
│ │ │ // in case of full backup, DR and CR
│ │ ├─ task.start(message: 'clean')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end
│ │ └─ task.end
| ├─ task.start(message: 'clean-vm')
│ │
│ │ │ // in case of delta backup
│ │ ├─ task.start(message: 'merge')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ └─ task.end
│ └─ task.end
└─ job.end
@@ -216,7 +195,6 @@ Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com
- `prepare({ isFull })`
- `transfer({ timestamp, deltaExport, sizeContainers })`
- `cleanup()`
- `healthCheck(sr)`
- **Full**
- `run({ timestamp, sizeContainer, stream })`
- `afterBackup()`

View File

@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.27.4",
"version": "0.23.0",
"engines": {
"node": ">=14.6"
},
@@ -22,7 +22,7 @@
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
@@ -38,7 +38,7 @@
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^8.3.2",
"vhd-lib": "^4.0.0",
"vhd-lib": "^3.1.0",
"yazl": "^2.5.1"
},
"devDependencies": {
@@ -46,7 +46,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.4.2"
"@xen-orchestra/xapi": "^1.0.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,8 +19,6 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
@@ -71,35 +69,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -111,9 +80,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
},
})
this.transfer = task.wrapFn(this.transfer)
this.healthCheck = task.wrapFn(this.healthCheck)
this.cleanup = task.wrapFn(this.cleanup)
this.afterBackup = task.wrapFn(this.afterBackup, true)
this.cleanup = task.wrapFn(this.cleanup, true)
return task.run(() => this._prepare())
}
@@ -189,7 +156,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataFilename = `${backupDir}/${basename}.json`
const metadataContent = {
jobId,
mode: job.mode,

View File

@@ -9,6 +9,4 @@ exports.AbstractWriter = class AbstractWriter {
beforeBackup() {}
afterBackup() {}
healthCheck(sr) {}
}

View File

@@ -6,9 +6,8 @@ const { join } = require('path')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const MergeWorker = require('../merge-worker/index.js')
const { formatFilenameDate } = require('../_filenameDate.js')
const { Task } = require('../Task.js')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
const { warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
@@ -26,17 +25,11 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async _cleanVm(options) {
try {
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
logInfo: info,
logWarn: (message, data) => {
warn(message, data)
Task.warning(message, data)
},
lock: false,
})
return await this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
onLog: warn,
lock: false,
})
} catch (error) {
warn(error)

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.2"
"xen-api": "^1.2.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/emit-async",
"version": "1.0.0",
"version": "0.1.0",
"license": "ISC",
"description": "Emit an event for async listeners to settle",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",

View File

@@ -1,19 +0,0 @@
## metadata files
- Older remotes dont have any metadata file
- Remote used since 5.75 have two files : encryption.json and metadata.json
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
If the remote is empty, the `sync` method creates them
### encryption.json
A non encrypted file contain the algorithm and parameters used for this remote.
This MUST NOT contains the key.
### metadata.json
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "3.0.0",
"version": "1.0.1",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -17,18 +17,18 @@
"xo-fs": "./cli.js"
},
"engines": {
"node": ">=14.13"
"node": ">=14"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^1.0.0",
"@vates/async-each": "^0.1.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"bind-property-descriptor": "^2.0.0",
@@ -40,10 +40,9 @@
"lodash": "^4.17.4",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"pumpify": "^2.0.1",
"readable-stream": "^4.1.0",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"xo-remote-parser": "^0.9.1"
"xo-remote-parser": "^0.8.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -51,6 +50,7 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
@@ -68,9 +68,5 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"exports": {
".": "./dist/index.js",
"./path": "./dist/path.js"
}
}

View File

@@ -1,71 +0,0 @@
const { readChunk } = require('@vates/read-chunk')
const crypto = require('crypto')
const pumpify = require('pumpify')
function getEncryptor(key) {
if (key === undefined) {
return {
id: 'NULL_ENCRYPTOR',
algorithm: 'none',
key: 'none',
ivLength: 0,
encryptData: buffer => buffer,
encryptStream: stream => stream,
decryptData: buffer => buffer,
decryptStream: stream => stream,
}
}
const algorithm = 'aes-256-cbc'
const ivLength = 16
function encryptStream(input) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = pumpify(input, cipher)
encrypted.unshift(iv)
return encrypted
}
async function decryptStream(encryptedStream) {
const iv = await readChunk(encryptedStream, ivLength)
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
/**
* WARNING
*
* the crytped size has an initializtion vector + a padding at the end
* whe can't predict the decrypted size from the start of the encrypted size
* thus, we can't set decrypted.length reliably
*
*/
return pumpify(encryptedStream, cipher)
}
function encryptData(buffer) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = cipher.update(buffer)
return Buffer.concat([iv, encrypted, cipher.final()])
}
function decryptData(buffer) {
const iv = buffer.slice(0, ivLength)
const encrypted = buffer.slice(ivLength)
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
const decrypted = decipher.update(encrypted)
return Buffer.concat([decrypted, decipher.final()])
}
return {
id: algorithm,
algorithm,
key,
ivLength,
encryptData,
encryptStream,
decryptData,
decryptStream,
}
}
exports._getEncryptor = getEncryptor

View File

@@ -1,6 +1,6 @@
import path from 'path'
const { basename, dirname, join, resolve, relative, sep } = path.posix
const { basename, dirname, join, resolve, sep } = path.posix
export { basename, dirname, join }
@@ -19,6 +19,3 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -1,20 +1,15 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import assert from 'assert'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes, randomUUID } from 'crypto'
import { randomBytes } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './path'
import { basename, dirname, normalize as normalizePath } from './_path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { _getEncryptor } from './_encryptor'
const { info, warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -25,9 +20,6 @@ const computeRate = (hrtime, size) => {
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -68,7 +60,6 @@ class PrefixWrapper {
}
export default class RemoteHandlerAbstract {
_encryptor
constructor(remote, options = {}) {
if (remote.url === 'test://') {
this._remote = remote
@@ -79,7 +70,6 @@ export default class RemoteHandlerAbstract {
}
}
;({ highWaterMark: this._highWaterMark, timeout: this._timeout = DEFAULT_TIMEOUT } = options)
this._encryptor = _getEncryptor(this._remote.encryptionKey)
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
this.closeFile = sharedLimit(this.closeFile)
@@ -118,51 +108,90 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (options.end !== undefined || options.start !== undefined) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
}
// TODO: remove method
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
let stream = await timeout.call(
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
this._timeout
)
// detect early errors
await fromEvent(stream, 'readable')
if (checksum) {
try {
const path = typeof file === 'string' ? file : file.path
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
} catch (error) {
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
throw error
}
}
if (!checksum) {
return streamP
}
if (this.isEncrypted) {
stream = this._encryptor.decryptStream(stream)
} else {
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
try {
stream.length = await this._getSize(file)
} catch (error) {
// ignore errors
}
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
return stream
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
.then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
promise = Promise.all([
promise,
ignoreErrors.call(
this._getSize(file).then(size => {
stream.length = size
})
),
])
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this._readFile(checksumFile(path), { flags: 'r' }).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
/**
@@ -178,8 +207,6 @@ export default class RemoteHandlerAbstract {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
input = this._encryptor.encryptStream(input)
if (checksum) {
checksumStream = createChecksumStream()
pipeline(input, checksumStream, noop)
@@ -190,8 +217,6 @@ export default class RemoteHandlerAbstract {
validator,
})
if (checksum) {
// using _outpuFile means the checksum will NOT be encrypted
// it is by design to allow checking of encrypted files without the key
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
}
}
@@ -211,13 +236,8 @@ export default class RemoteHandlerAbstract {
return timeout.call(this._getInfo(), this._timeout)
}
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
async getSize(file) {
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
return size - this._encryptor.ivLength
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
}
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
@@ -263,18 +283,15 @@ export default class RemoteHandlerAbstract {
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
await this._outputFile(normalizePath(file), data, { dirMode, flags })
}
async read(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async readFile(file, { flags = 'r' } = {}) {
const data = await this._readFile(normalizePath(file), { flags })
return this._encryptor.decryptData(data)
return this._readFile(normalizePath(file), { flags })
}
async rename(oldPath, newPath, { checksum = false } = {}) {
@@ -314,61 +331,6 @@ export default class RemoteHandlerAbstract {
@synchronized()
async sync() {
await this._sync()
try {
await this._checkMetadata()
} catch (error) {
await this._forget()
throw error
}
}
async _canWriteMetadata() {
const list = await this.list('/', {
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
})
return list.length === 0
}
async _createMetadata() {
await Promise.all([
this._writeFile(
normalizePath(ENCRYPTION_DESC_FILENAME),
JSON.stringify({ algorithm: this._encryptor.algorithm }),
{
flags: 'w',
}
), // not encrypted
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
])
}
async _checkMetadata() {
try {
// this file is not encrypted
const data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
JSON.parse(data)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
try {
// this file is encrypted
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME)
JSON.parse(data)
} catch (error) {
if (error.code === 'ENOENT' || (await this._canWriteMetadata())) {
info('will update metadata of this remote')
return this._createMetadata()
}
warn(
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
{ error }
)
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
throw error
}
}
async test() {
@@ -395,12 +357,11 @@ export default class RemoteHandlerAbstract {
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
warn(`error while testing the remote at step ${step}`, { error })
return {
success: false,
step,
file: testFileName,
error,
error: error.message || String(error),
}
} finally {
ignoreErrors.call(this._unlink(testFileName))
@@ -422,13 +383,11 @@ export default class RemoteHandlerAbstract {
}
async write(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async writeFile(file, data, { flags = 'wx' } = {}) {
const encryptedData = this._encryptor.encryptData(data)
await this._writeFile(normalizePath(file), encryptedData, { flags })
await this._writeFile(normalizePath(file), data, { flags })
}
// Methods that can be called by private methods to avoid parallel limit on public methods
@@ -461,10 +420,6 @@ export default class RemoteHandlerAbstract {
// Methods that can be implemented by inheriting classes
useVhdDirectory() {
return this._remote.useVhdDirectory ?? false
}
async _closeFile(fd) {
throw new Error('Not implemented')
}
@@ -547,13 +502,9 @@ export default class RemoteHandlerAbstract {
async _outputStream(path, input, { dirMode, validator }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await timeout.call(
this._createOutputStream(tmpPath, {
dirMode,
flags: 'wx',
}),
this._timeout
)
const output = await this.createOutputStream(tmpPath, {
dirMode,
})
try {
await fromCallback(pipeline, input, output)
if (validator !== undefined) {
@@ -636,10 +587,6 @@ export default class RemoteHandlerAbstract {
async _writeFile(file, data, options) {
throw new Error('Not implemented')
}
get isEncrypted() {
return this._encryptor.id !== 'NULL_ENCRYPTOR'
}
}
function createPrefixWrapperMethods() {

View File

@@ -30,6 +30,18 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -1,7 +1,10 @@
/* eslint-env jest */
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -24,6 +27,9 @@ const unsecureRandomBytes = n => {
const TEST_DATA_LEN = 1024
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
const createTestDataStream = asyncIteratorToStream(function* () {
yield TEST_DATA
})
const rejectionOf = p =>
p.then(
@@ -76,6 +82,14 @@ handlers.forEach(url => {
})
})
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {

View File

@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
const HANDLERS = {
file: RemoteHandlerLocal,
@@ -14,8 +15,10 @@ const HANDLERS = {
try {
execa.sync('mount.cifs', ['-V'])
HANDLERS.smb = RemoteHandlerSmbMount
} catch (_) {
HANDLERS.smb = RemoteHandlerSmb
} catch (_) {}
}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]

View File

@@ -1,35 +1,13 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import identity from 'lodash/identity.js'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
const { info, warn } = createLogger('xo:fs:local')
// save current stack trace and add it to any rejected error
//
// This is especially useful when the resolution is separate from the initial
// call, which is often the case with RPC libs.
//
// There is a perf impact and it should be avoided in production.
async function addSyncStackTrace(promise) {
const stackContainer = new Error()
try {
return await promise
} catch (error) {
error.stack = stackContainer.stack
throw error
}
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote, opts = {}) {
super(remote)
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : identity
this._retriesOnEagain = {
delay: 1e3,
retries: 9,
@@ -52,17 +30,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return this._addSyncStackTrace(fs.close(fd))
return fs.close(fd)
}
async _copy(oldPath, newPath) {
return this._addSyncStackTrace(fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath)))
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent(stream, 'open'))
await fromEvent(stream, 'open')
return stream
}
return fs.createReadStream('', {
@@ -75,7 +53,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
async _createWriteStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createWriteStream(this._getFilePath(file), options)
await this._addSyncStackTrace(fromEvent(stream, 'open'))
await fromEvent(stream, 'open')
return stream
}
return fs.createWriteStream('', {
@@ -101,93 +79,71 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize(file) {
const stats = await this._addSyncStackTrace(fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path)))
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
return stats.size
}
async _list(dir) {
return this._addSyncStackTrace(fs.readdir(this._getFilePath(dir)))
return fs.readdir(this._getFilePath(dir))
}
async _lock(path) {
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
async onCompromised(error) {
warn('lock compromised', { error })
try {
release = await acquire()
info('compromised lock was reacquired')
} catch (error) {
warn('compromised lock could not be reacquired', { error })
}
},
})
let release = await acquire()
return async () => {
try {
await release()
} catch (error) {
warn('lock could not be released', { error })
}
}
_lock(path) {
return lockfile.lock(this._getFilePath(path))
}
_mkdir(dir, { mode }) {
return this._addSyncStackTrace(fs.mkdir(this._getFilePath(dir), { mode }))
return fs.mkdir(this._getFilePath(dir), { mode })
}
async _openFile(path, flags) {
return this._addSyncStackTrace(fs.open(this._getFilePath(path), flags))
return fs.open(this._getFilePath(path), flags)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await this._addSyncStackTrace(fs.open(this._getFilePath(file), 'r')) : file.fd
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
try {
return await this._addSyncStackTrace(
fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
)
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
} finally {
if (needsClose) {
await this._addSyncStackTrace(fs.close(file))
await fs.close(file)
}
}
}
async _readFile(file, options) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry(() => fs.readFile(filePath, options), this._retriesOnEagain))
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
}
async _rename(oldPath, newPath) {
return this._addSyncStackTrace(fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath)))
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _rmdir(dir) {
return this._addSyncStackTrace(fs.rmdir(this._getFilePath(dir)))
return fs.rmdir(this._getFilePath(dir))
}
async _sync() {
const path = this._getRealPath('/')
await this._addSyncStackTrace(fs.ensureDir(path))
await this._addSyncStackTrace(fs.access(path, fs.R_OK | fs.W_OK))
await fs.ensureDir(path)
await fs.access(path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return this._addSyncStackTrace(fs.truncate(this._getFilePath(file), len))
return fs.truncate(this._getFilePath(file), len)
}
async _unlink(file) {
const filePath = this._getFilePath(file)
return await this._addSyncStackTrace(retry(() => fs.unlink(filePath), this._retriesOnEagain))
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
}
_writeFd(file, buffer, position) {
return this._addSyncStackTrace(fs.write(file.fd, buffer, 0, buffer.length, position))
return fs.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return this._addSyncStackTrace(fs.writeFile(this._getFilePath(file), data, { flag: flags }))
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
}
}

View File

@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
import createBufferFromStream from './_createBufferFromStream.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './path'
import { basename, join, split } from './_path'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -259,21 +259,21 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
}
async _list(dir, { delimiter = '/' } = {}) {
async _list(dir) {
let NextContinuationToken
const uniq = new Set()
const Prefix = this._makePrefix(dir)
do {
const command = {
Bucket: this._bucket,
Prefix,
// will only return path until delimiters
ContinuationToken: NextContinuationToken,
}
if (delimiter !== null) {
command.Delimiter = delimiter
}
const result = await this._s3.send(new ListObjectsV2Command(command))
const result = await this._s3.send(
new ListObjectsV2Command({
Bucket: this._bucket,
Prefix,
Delimiter: '/',
// will only return path until delimiters
ContinuationToken: NextContinuationToken,
})
)
if (result.IsTruncated) {
warn(`need pagination to browse the directory ${dir} completely`)
@@ -289,7 +289,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
// files
for (const entry of result.Contents ?? []) {
uniq.add(delimiter === null ? entry.Key.substr(Prefix.length) : basename(entry.Key))
uniq.add(basename(entry.Key))
}
} while (NextContinuationToken !== undefined)
@@ -308,7 +308,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
// s3 doesn't have a rename operation, so copy + delete source
async _rename(oldPath, newPath) {
await this._copy(oldPath, newPath)
await this.copy(oldPath, newPath)
await this._s3.send(new DeleteObjectCommand(this._createParams(oldPath)))
}
@@ -525,8 +525,4 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
useVhdDirectory() {
return true
}
}

View File

@@ -0,0 +1,23 @@
import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import { normalize } from './_path'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
}

View File

@@ -1,23 +1,163 @@
import { parse } from 'xo-remote-parser'
import Smb2 from '@marsaud/smb2'
import MountHandler from './_mount'
import { normalize } from './path'
import RemoteHandlerAbstract from './abstract'
export default class SmbHandler extends MountHandler {
// Normalize the error code for file not found.
const wrapError = (error, code) => ({
__proto__: error,
cause: error,
code,
})
const normalizeError = (error, shouldBeDirectory) => {
const { code } = error
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
? wrapError(error, 'ENOTEMPTY')
: code === 'STATUS_FILE_IS_A_DIRECTORY'
? wrapError(error, 'EISDIR')
: code === 'STATUS_NOT_A_DIRECTORY'
? wrapError(error, 'ENOTDIR')
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
? wrapError(error, 'ENOENT')
: code === 'STATUS_OBJECT_NAME_COLLISION'
? wrapError(error, 'EEXIST')
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
: error
}
const normalizeDirError = error => normalizeError(error, true)
export default class SmbHandler extends RemoteHandlerAbstract {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
super(remote, opts)
// defined in _sync()
this._client = undefined
const prefix = this._remote.path
this._prefix = prefix !== '' ? prefix + '\\' : prefix
}
get type() {
return 'smb'
}
_getFilePath(file) {
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
}
_dirname(file) {
const parts = file.split('\\')
parts.pop()
return parts.join('\\')
}
_closeFile(file) {
return this._client.close(file).catch(normalizeError)
}
_createReadStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createReadStream(file, options).catch(normalizeError)
}
_createWriteStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createWriteStream(file, options).catch(normalizeError)
}
_forget() {
const client = this._client
this._client = undefined
return client.disconnect()
}
_getSize(file) {
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
}
_list(dir) {
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_mkdir(dir, { mode }) {
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
}
// TODO: add flags
_openFile(path, flags) {
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
}
async _read(file, buffer, position) {
const client = this._client
const needsClose = typeof file === 'string'
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
try {
return await client.read(file, buffer, 0, buffer.length, position)
} catch (error) {
normalizeError(error)
} finally {
if (needsClose) {
await client.close(file)
}
}
}
_readFile(file, options) {
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
}
_rename(oldPath, newPath) {
return this._client
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
replace: true,
})
.catch(normalizeError)
}
_rmdir(dir) {
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_sync() {
const remote = this._remote
this._client = new Smb2({
share: `\\\\${remote.host}`,
domain: remote.domain,
username: remote.username,
password: remote.password,
autoCloseTimeout: 0,
})
// Check access (smb2 does not expose connect in public so far...)
return this.list('.')
}
_truncate(file, len) {
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
}
}

View File

@@ -7,9 +7,6 @@ import { watch } from 'app-conf'
const { warn } = createLogger('xo:mixins:config')
// if path is undefined, an empty string or an empty array, returns the root value
const niceGet = (value, path) => (path === undefined || path.length === 0 ? value : get(value, path))
export default class Config {
constructor(app, { appDir, appName, config }) {
this._config = config
@@ -33,7 +30,7 @@ export default class Config {
}
get(path) {
const value = niceGet(this._config, path)
const value = get(this._config, path)
if (value === undefined) {
throw new TypeError('missing config entry: ' + path)
}
@@ -45,23 +42,17 @@ export default class Config {
}
getOptional(path) {
return niceGet(this._config, path)
return get(this._config, path)
}
watch(path, cb) {
// short syntax for the whole config: watch(cb)
if (typeof path === 'function') {
cb = path
path = undefined
}
// internal arg
const processor = arguments.length > 2 ? arguments[2] : identity
let prev
const watcher = config => {
try {
const value = processor(niceGet(config, path))
const value = processor(get(config, path))
if (!isEqual(value, prev)) {
const previous = prev
prev = value

View File

@@ -40,7 +40,7 @@ export default class HttpProxy {
this.#app = app
const events = new EventListenersManager(httpServer)
app.config.watch('http.proxy.enabled', (enabled = true) => {
app.config.watch('http.proxy.enabled', (enabled = false) => {
events.removeAll()
if (enabled) {
events.add('connect', this.#handleConnect.bind(this)).add('request', this.#handleRequest.bind(this))

View File

@@ -1,214 +0,0 @@
import { createLogger } from '@xen-orchestra/log'
import { createSecureContext } from 'tls'
import { dirname } from 'node:path'
import { X509Certificate } from 'node:crypto'
import acme from 'acme-client'
import fs from 'node:fs/promises'
import get from 'lodash/get.js'
const { debug, info, warn } = createLogger('xo:mixins:sslCertificate')
acme.setLogger(message => {
debug(message)
})
// - create any missing parent directories
// - replace existing files
// - secure permissions (read-only for the owner)
async function outputFile(path, content) {
await fs.mkdir(dirname(path), { recursive: true })
try {
await fs.unlink(path)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
await fs.writeFile(path, content, { flag: 'wx', mode: 0o400 })
}
// from https://github.com/publishlab/node-acme-client/blob/master/examples/auto.js
class SslCertificate {
#cert
#challengeCreateFn
#challengeRemoveFn
#delayBeforeRenewal = 30 * 24 * 60 * 60 * 1000 // 30 days
#secureContext
#updateSslCertificatePromise
constructor({ challengeCreateFn, challengeRemoveFn }, cert, key) {
this.#challengeCreateFn = challengeCreateFn
this.#challengeRemoveFn = challengeRemoveFn
this.#set(cert, key)
}
get #isValid() {
const cert = this.#cert
return cert !== undefined && Date.parse(cert.validTo) > Date.now() && cert.issuer !== cert.subject
}
get #shouldBeRenewed() {
return !(this.#isValid && Date.parse(this.#cert.validTo) > Date.now() + this.#delayBeforeRenewal)
}
#set(cert, key) {
this.#cert = new X509Certificate(cert)
this.#secureContext = createSecureContext({ cert, key })
}
async getSecureContext(config) {
if (!this.#shouldBeRenewed) {
return this.#secureContext
}
if (this.#updateSslCertificatePromise === undefined) {
// not currently updating certificate
//
// ensure we only refresh certificate once at a time
//
// promise is cleaned by #updateSslCertificate itself
this.#updateSslCertificatePromise = this.#updateSslCertificate(config)
}
// old certificate is still here, return it while updating
if (this.#isValid) {
return this.#secureContext
}
return this.#updateSslCertificatePromise
}
async #save(certPath, cert, keyPath, key) {
try {
await Promise.all([outputFile(keyPath, key), outputFile(certPath, cert)])
info('new certificate generated', { cert: certPath, key: keyPath })
} catch (error) {
warn(`couldn't write let's encrypt certificates to disk `, { error })
}
}
async #updateSslCertificate(config) {
const { cert: certPath, key: keyPath, acmeEmail, acmeDomain } = config
try {
let { acmeCa = 'letsencrypt/production' } = config
if (!(acmeCa.startsWith('http:') || acmeCa.startsWith('https:'))) {
acmeCa = get(acme.directory, acmeCa.split('/'))
}
/* Init client */
const client = new acme.Client({
directoryUrl: acmeCa,
accountKey: await acme.crypto.createPrivateKey(),
})
/* Create CSR */
let [key, csr] = await acme.crypto.createCsr({
commonName: acmeDomain,
})
csr = csr.toString()
key = key.toString()
debug('Successfully generated key and csr')
/* Certificate */
const cert = await client.auto({
challengeCreateFn: this.#challengeCreateFn,
challengePriority: ['http-01'],
challengeRemoveFn: this.#challengeRemoveFn,
csr,
email: acmeEmail,
skipChallengeVerification: true,
termsOfServiceAgreed: true,
})
debug('Successfully generated certificate')
this.#set(cert, key)
// don't wait for this
this.#save(certPath, cert, keyPath, key)
return this.#secureContext
} catch (error) {
warn(`couldn't renew ssl certificate`, { acmeDomain, error })
} finally {
this.#updateSslCertificatePromise = undefined
}
}
}
export default class SslCertificates {
#app
#challenges = new Map()
#challengeHandlers = {
challengeCreateFn: (authz, challenge, keyAuthorization) => {
this.#challenges.set(challenge.token, keyAuthorization)
},
challengeRemoveFn: (authz, challenge, keyAuthorization) => {
this.#challenges.delete(challenge.token)
},
}
#handlers = new Map()
constructor(app, { httpServer }) {
// don't setup the proxy if httpServer is not present
//
// that can happen when the app is instanciated in another context like xo-server-recover-account
if (httpServer === undefined) {
return
}
const prefix = '/.well-known/acme-challenge/'
httpServer.on('request', (req, res) => {
const { url } = req
if (url.startsWith(prefix)) {
const token = url.slice(prefix.length)
this.#acmeChallendMiddleware(req, res, token)
}
})
this.#app = app
httpServer.getSecureContext = this.getSecureContext.bind(this)
}
async getSecureContext(httpsDomainName, configKey, initialCert, initialKey) {
const config = this.#app.config.get(['http', 'listen', configKey])
const handlers = this.#handlers
const { acmeDomain } = config
// not a let's encrypt protected end point, sommething changed in the configuration
if (acmeDomain === undefined) {
handlers.delete(configKey)
return
}
// server has been access with another domain, don't use the certificate
if (acmeDomain !== httpsDomainName) {
return
}
let handler = handlers.get(configKey)
if (handler === undefined) {
// register the handler for this domain
handler = new SslCertificate(this.#challengeHandlers, initialCert, initialKey)
handlers.set(configKey, handler)
}
return handler.getSecureContext(config)
}
// middleware that will serve the http challenge to let's encrypt servers
#acmeChallendMiddleware(req, res, token) {
debug('fetching challenge for token ', token)
const challenge = this.#challenges.get(token)
debug('challenge content is ', challenge)
if (challenge === undefined) {
res.statusCode = 404
res.end()
return
}
res.write(challenge)
res.end()
debug('successfully answered challenge ')
}
}

View File

@@ -10,11 +10,11 @@
## Set up
The proxy is enabled by default, to disable it, add the following lines to your config:
The proxy is disabled by default, to enable it, add the following lines to your config:
```toml
[http.proxy]
enabled = false
enabled = true
```
## Usage

View File

@@ -1,49 +0,0 @@
> This module provides [Let's Encrypt](https://letsencrypt.org/) integration to `xo-proxy` and `xo-server`.
First of all, make sure your server is listening on HTTP on port 80 and on HTTPS 443.
In `xo-server`, to avoid HTTP access, enable the redirection to HTTPs:
```toml
[http]
redirectToHttps = true
```
Your server must be reachable with the configured domain to the certificate provider (e.g. Let's Encrypt), it usually means publicly reachable.
Finally, add the following entries to your HTTPS configuration.
```toml
# Must be set to true for this feature
autoCert = true
# These entries are required and indicates where the certificate and the
# private key will be saved.
cert = 'path/to/cert.pem'
key = 'path/to/key.pem'
# ACME (e.g. Let's Encrypt, ZeroSSL) CA directory
#
# Specifies the URL to the ACME CA's directory.
#
# A identifier `provider/directory` can be passed instead of a URL, see the
# list of supported directories here: https://www.npmjs.com/package/acme-client#directory-urls
#
# Note that the application cannot detect that this value has changed.
#
# In that case delete the certificate and the key files, and restart the
# application to generate new ones.
#
# Default is 'letsencrypt/production'
acmeCa = 'zerossl/production'
# Domain for which the certificate should be created.
#
# This entry is required.
acmeDomain = 'my.domain.net'
# Optional email address which will be used for the certificate creation.
#
# It will be notified of any issues.
acmeEmail = 'admin@my.domain.net'
```

View File

@@ -14,16 +14,15 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.7.1",
"version": "0.4.0",
"engines": {
"node": ">=15.6"
"node": ">=12"
},
"dependencies": {
"@vates/event-listeners-manager": "^1.0.1",
"@vates/event-listeners-manager": "^1.0.0",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/emit-async": "^0.1.0",
"@xen-orchestra/log": "^0.3.0",
"acme-client": "^5.0.0",
"app-conf": "^2.1.0",
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0"

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.2",
"version": "0.1.1",
"engines": {
"node": ">=8.10"
},
@@ -30,7 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/read-chunk": "^1.0.0"
"@vates/read-chunk": "^0.1.2"
},
"author": {
"name": "Vates SAS",

View File

@@ -33,19 +33,26 @@ async function main(argv) {
ignoreUnknownFormats: true,
})
const opts = getopts(argv, {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
const {
_: args,
file,
help,
host,
raw,
token,
} = getopts(argv, {
alias: { file: 'f', help: 'h' },
boolean: ['help', 'raw'],
default: {
token: config.authenticationToken,
},
stopEarly: true,
string: ['file', 'host', 'token', 'url'],
string: ['file', 'host', 'token'],
})
const { _: args, file } = opts
if (opts.help || (file === '' && args.length === 0)) {
if (help || (file === '' && args.length === 0)) {
return console.log(
'%s',
`Usage:
@@ -70,29 +77,18 @@ ${pkg.name} v${pkg.version}`
const baseRequest = {
headers: {
'content-type': 'application/json',
cookie: `authenticationToken=${token}`,
},
pathname: '/api/v1',
protocol: 'https:',
rejectUnauthorized: false,
}
let { token } = opts
if (opts.url !== '') {
const { protocol, host, username } = new URL(opts.url)
Object.assign(baseRequest, { protocol, host })
if (username !== '') {
token = username
}
if (host !== '') {
baseRequest.host = host
} else {
baseRequest.protocol = 'https:'
if (opts.host !== '') {
baseRequest.host = opts.host
} else {
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
baseRequest.hostname = hostname
baseRequest.port = port
}
baseRequest.hostname = hostname
baseRequest.port = port
}
baseRequest.headers.cookie = `authenticationToken=${token}`
const call = async ({ method, params }) => {
if (callPath.length !== 0) {
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
@@ -131,7 +127,7 @@ ${pkg.name} v${pkg.version}`
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
stdout.write('\n')
}
} else if (opts.raw && typeof result === 'string') {
} else if (raw && typeof result === 'string') {
stdout.write(result)
} else {
stdout.write(inspect(result, { colors: true, depth: null }))

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/proxy-cli",
"version": "0.3.1",
"version": "0.2.0",
"license": "AGPL-3.0-or-later",
"description": "CLI for @xen-orchestra/proxy",
"keywords": [
@@ -26,7 +26,7 @@
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^1.0.0",
"@vates/read-chunk": "^0.1.2",
"ansi-colors": "^4.1.1",
"app-conf": "^2.1.0",
"content-type": "^1.0.4",

View File

@@ -1,7 +1,6 @@
import Config from '@xen-orchestra/mixins/Config.mjs'
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
import SslCertificate from '@xen-orchestra/mixins/SslCertificate.mjs'
import mixin from '@xen-orchestra/mixin'
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
@@ -15,23 +14,9 @@ import ReverseProxy from './mixins/reverseProxy.mjs'
export default class App {
constructor(opts) {
mixin(
this,
{
Api,
Appliance,
Authentication,
Backups,
Config,
Hooks,
HttpProxy,
Logs,
Remotes,
ReverseProxy,
SslCertificate,
},
[opts]
)
mixin(this, { Api, Appliance, Authentication, Backups, Config, Hooks, HttpProxy, Logs, Remotes, ReverseProxy }, [
opts,
])
const debounceResource = createDebounceResource()
this.config.watchDuration('resourceCacheDelay', delay => {

View File

@@ -1,4 +1,4 @@
import { format, parse, MethodNotFound, JsonRpcError } from 'json-rpc-protocol'
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
import * as errors from 'xo-common/api-errors.js'
import Ajv from 'ajv'
import asyncIteratorToStream from 'async-iterator-to-stream'
@@ -9,7 +9,6 @@ import helmet from 'koa-helmet'
import Koa from 'koa'
import once from 'lodash/once.js'
import Router from '@koa/router'
import stubTrue from 'lodash/stubTrue.js'
import Zone from 'node-zone'
import { createLogger } from '@xen-orchestra/log'
@@ -78,19 +77,7 @@ export default class Api {
const { method, params } = body
warn('call error', { method, params, error })
ctx.set('Content-Type', 'application/json')
let e = error
if (error != null && typeof error.toJsonRpcError !== 'function') {
const { message, ...data } = error
// force these entries even if they are not enumerable
data.code = error.code
data.stack = error.stack
e = new JsonRpcError(error.message, undefined, data)
}
ctx.body = format.error(body.id, e)
ctx.body = format.error(body.id, error)
return
}
@@ -179,20 +166,14 @@ export default class Api {
throw errors.noSuchObject('method', name)
}
const { description, params = {}, result = {} } = method
return { description, name, params, result }
const { description, params = {} } = method
return { description, name, params }
},
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' },
},
result: {
description: { type: 'string' },
name: { type: 'string' },
params: { type: 'object' },
result: { type: 'object' },
},
},
],
},
@@ -224,29 +205,40 @@ export default class Api {
})
}
addMethod(name, method, { description, params = {}, result: resultSchema } = {}) {
addMethod(name, method, { description, params = {} } = {}) {
const methods = this._methods
if (name in methods) {
throw new Error(`API method ${name} already exists`)
}
const validateParams = this.#compileSchema(params)
const validateResult = this.#compileSchema(resultSchema)
const ajv = this._ajv
const validate = ajv.compile({
// we want additional properties to be disabled by default
additionalProperties: params['*'] || false,
const m = async params => {
if (!validateParams(params)) {
throw errors.invalidParameters(validateParams.errors)
properties: params,
// we want params to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
required: Object.keys(params).filter(name => {
const param = params[name]
const required = !param.optional
delete param.optional
return required
}),
type: 'object',
})
const m = params => {
if (!validate(params)) {
throw errors.invalidParameters(validate.errors)
}
const result = await method(params)
if (!validateResult(result)) {
warn('invalid API method result', { errors: validateResult.error, result })
}
return result
return method(params)
}
m.description = description
m.params = params
m.result = resultSchema
methods[name] = m
@@ -297,43 +289,4 @@ export default class Api {
}
return fn(params)
}
#compileSchema(schema) {
if (schema === undefined) {
return stubTrue
}
if (schema.type === undefined) {
schema = { type: 'object', properties: schema }
}
const { type } = schema
if (Array.isArray(type) ? type.includes('object') : type === 'object') {
const { properties = {} } = schema
if (schema.additionalProperties === undefined) {
const wildCard = properties['*']
if (wildCard === undefined) {
// we want additional properties to be disabled by default
schema.additionalProperties = false
} else {
delete properties['*']
schema.additionalProperties = wildCard
}
}
// we want properties to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
if (schema.required === undefined) {
schema.required = Object.keys(properties).filter(name => {
const param = properties[name]
const required = !param.optional
delete param.optional
return required
})
}
}
return this._ajv.compile(schema)
}
}

View File

@@ -56,32 +56,11 @@ ${APP_NAME} v${APP_VERSION}
createSecureServer: opts => createSecureServer({ ...opts, allowHTTP1: true }),
})
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }, configKey) => {
const useAcme = autoCert && opts.acmeDomain !== undefined
// don't pass these entries to httpServer.listen(opts)
for (const key of Object.keys(opts).filter(_ => _.startsWith('acme'))) {
delete opts[key]
}
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }) => {
try {
let niceAddress
if (cert !== undefined && key !== undefined) {
if (useAcme) {
opts.SNICallback = async (serverName, callback) => {
try {
// injected by mixins/SslCertificate
const secureContext = await httpServer.getSecureContext(serverName, configKey, opts.cert, opts.key)
callback(null, secureContext)
} catch (error) {
warn(error)
callback(error, null)
}
}
}
niceAddress = await pRetry(
async () => {
const niceAddress = await pRetry(
async () => {
if (cert !== undefined && key !== undefined) {
try {
opts.cert = fse.readFileSync(cert)
opts.key = fse.readFileSync(key)
@@ -97,22 +76,20 @@ ${APP_NAME} v${APP_VERSION}
opts.cert = pems.cert
opts.key = pems.key
}
return httpServer.listen(opts)
},
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
},
}
)
} else {
niceAddress = await httpServer.listen(opts)
}
return httpServer.listen(opts)
},
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
},
}
)
info(`Web server listening on ${niceAddress}`)
} catch (error) {
@@ -169,7 +146,6 @@ ${APP_NAME} v${APP_VERSION}
process.on(signal, () => {
if (alreadyCalled) {
warn('forced exit')
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
alreadyCalled = true
@@ -188,7 +164,6 @@ main(process.argv.slice(2)).then(
error => {
fatal(error)
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
)

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.26.0",
"version": "0.22.1",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -26,19 +26,19 @@
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@koa/router": "^12.0.0",
"@koa/router": "^10.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.0.0",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.7.1",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/xapi": "^1.4.2",
"@xen-orchestra/mixins": "^0.4.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^1.0.0",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"async-iterator-to-stream": "^1.1.0",
@@ -46,7 +46,7 @@
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.1",
"http-server-plus": "^0.11.0",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
@@ -60,7 +60,7 @@
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.2",
"xen-api": "^1.2.0",
"xo-common": "^0.8.0"
},
"devDependencies": {

View File

@@ -2,23 +2,22 @@
const { execFile } = require('child_process')
const RE =
/^(-----BEGIN PRIVATE KEY-----.+-----END PRIVATE KEY-----\n)(-----BEGIN CERTIFICATE-----.+-----END CERTIFICATE-----\n)$/s
exports.genSelfSignedCert = async ({ days = 360 } = {}) =>
const openssl = (cmd, args, { input, ...opts } = {}) =>
new Promise((resolve, reject) => {
execFile(
'openssl',
['req', '-batch', '-new', '-x509', '-days', String(days), '-nodes', '-newkey', 'rsa:2048', '-keyout', '-'],
(error, stdout) => {
if (error != null) {
return reject(error)
}
const matches = RE.exec(stdout)
if (matches === null) {
return reject(new Error('stdout does not match regular expression'))
}
const [, key, cert] = matches
resolve({ cert, key })
}
const child = execFile('openssl', [cmd, ...args], opts, (error, stdout) =>
error != null ? reject(error) : resolve(stdout)
)
if (input !== undefined) {
child.stdin.end(input)
}
})
exports.genSelfSignedCert = async ({ days = 360 } = {}) => {
const key = await openssl('genrsa', ['2048'])
return {
cert: await openssl('req', ['-batch', '-new', '-key', '-', '-x509', '-days', String(days), '-nodes'], {
input: key,
}),
key,
}
}

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.3",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -0,0 +1,3 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1 @@
../../scripts/babel-eslintrc.js

View File

@@ -14,13 +14,31 @@
"name": "Vates SAS",
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"lodash": "^4.17.15"
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish --access public"
},
"dependencies": {
"lodash": "^4.17.15"
}
}

View File

@@ -1,10 +1,8 @@
'use strict'
const escapeRegExp = require('lodash/escapeRegExp')
import escapeRegExp from 'lodash/escapeRegExp'
const compareLengthDesc = (a, b) => b.length - a.length
exports.compileTemplate = function compileTemplate(pattern, rules) {
export function compileTemplate(pattern, rules) {
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
return (...params) =>

View File

@@ -1,8 +1,5 @@
/* eslint-env jest */
'use strict'
const { compileTemplate } = require('.')
import { compileTemplate } from '.'
it("correctly replaces the template's variables", () => {
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {

View File

@@ -0,0 +1,5 @@
{
"extends": [
"plugin:cypress/recommended"
]
}

4
@xen-orchestra/test-e2e/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
cypress/downloads
cypress/screenshots
cypress/videos
cypress.json

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,28 @@
{
"baseUrl": "http://ci-test-xen-orchestra.localdomain/",
"env": {
"xoLab": {
"url": "",
"username": "",
"password": ""
},
"xenOrchestra": {
"host": "ci-test-xen-orchestra.localdomain",
"username": "",
"password": "",
"snapshotId": ""
},
"xcpNgLts": {
"host": "ci-test-xcp-ng-lts.localdomain",
"username": "",
"password": "",
"snapshotId": ""
},
"xenServerLts": {
"host": "ci-test-xenserver-lts.localdomain",
"username": "",
"password": "",
"snapshotId": ""
}
}
}

View File

@@ -0,0 +1,26 @@
'use strict'
describe('Sign In', () => {
it('should not be able to sign in with bad credentials', () => {
cy.visit('/')
cy.get('[name="username"]').type('bad-user')
cy.get('[name="password"]').type('bad-password')
cy.get('.btn-info').click()
cy.get('.text-danger')
cy.url().should('not.include', '/#/home')
})
it('should be able to sign in', () => {
cy.visit('/')
cy.get('[name="username"]').type(Cypress.env('xenOrchestra').username)
cy.get('[name="password"]').type(Cypress.env('xenOrchestra').password)
cy.get('.btn-info').click()
cy.url().should('include', '/#/home')
})
it('should sign in without UI', () => {
cy.login()
cy.visit('/')
cy.url().should('include', '/#/home')
})
})

View File

@@ -0,0 +1,20 @@
'use strict'
describe('Remote', function () {
beforeEach('login', () => {
cy.login()
})
it('should add a remote', function () {
cy.addServers()
cy.visit('/#/settings/remotes')
cy.contains('New file system remote').should('exist')
cy.get('select[name="type"]').select('Local')
cy.get('input[name="name"]').type('Test local file remote 2')
cy.get('input[name="path"]').type('var/tmp/test-remote')
cy.contains('Save configuration').click()
cy.contains('Local remote selected')
cy.get('button:contains("OK")').click()
cy.get('td:contains("Test local file remote 2")').closest('tr').find('button:contains("Enabled")')
})
})

View File

@@ -0,0 +1,38 @@
'use strict'
describe('Server', () => {
beforeEach('login', () => {
cy.login()
})
it('should add a server', () => {
cy.visit('/#/settings/servers')
cy.get('#form-add-server .form-group:nth-child(1) input').type('XCP-ng LTS')
cy.get('#form-add-server .form-group:nth-child(2) input').type(Cypress.env('xcpNgLts').host)
cy.get('#form-add-server .form-group:nth-child(3) input').type(Cypress.env('xcpNgLts').username)
cy.get('#form-add-server .form-group:nth-child(4) input').type(Cypress.env('xcpNgLts').password)
cy.get('#form-add-server .form-group:nth-child(5) .xo-icon-toggle-off').click()
cy.get('span:contains("Connect")').click()
cy.get(`td:contains("XCP-ng LTS")`)
})
it('should remove a server', () => {
cy.get(`td:contains("XCP-ng LTS")`).closest('tr').find('.btn-danger').click()
cy.get(`td:contains("XCP-ng LTS")`).should('not.exist')
})
it('should disable a server', () => {
cy.addServers();
cy.visit('/#/settings/servers')
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Enabled")').click()
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Disabled")')
})
it('should enable a server', () => {
cy.visit('/#/settings/servers')
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Disabled")').click()
cy.contains('ci-test-xcp-ng-lts.localdomain').closest('tr').find('button:contains("Enabled")')
})
})

View File

@@ -0,0 +1,17 @@
'use strict'
describe('VM', function () {
beforeEach('login', () => {
cy.login()
})
it('should add a VM', function () {
cy.addServers()
cy.visit('/#/home?t=VM')
cy.get('a:contains("New VM")').click()
cy.contains('Create a new VM on').should('exist')
cy.contains('Select pool').closest('.Select-control').click()
cy.contains('ci-test-xcp-ng-lts').should('exist').click()
cy.get('h4:contains("Performance")').should('exist')
})
})

View File

@@ -0,0 +1,6 @@
/// <reference types="cypress" />
'use strict'
module.exports = (on, config) => {
// configure plugins here
}

View File

@@ -0,0 +1,39 @@
'use strict'
// https://on.cypress.io/custom-commands
const { default: Xo } = require('xo-lib')
const { username: xoUsername, password: xoPassword } = Cypress.env('xenOrchestra')
Cypress.Commands.add('login', (username = xoUsername, password = xoPassword) => {
cy.request({
method: 'POST',
url: '/signin/local',
form: true,
body: {
username,
password,
},
})
cy.setCookie('previousDisclaimer', Date.now().toString())
})
Cypress.Commands.add('addServers', async () => {
const xo = new Xo({ url: Cypress.config('baseUrl') })
await xo.open()
await xo.signIn({
email: xoUsername,
password: xoPassword,
})
const { host, username, password } = Cypress.env('xcpNgLts')
await xo.call('server.add', {
host,
username,
password,
label: 'XCP-ng LTS',
allowUnauthorized: true,
})
})

View File

@@ -0,0 +1,9 @@
/// <reference types="cypress" />
declare namespace Cypress {
interface Chainable<Subject> {
login(username: string, password: string): Chainable<any>
logout(): Chainable<any>
addServers(): Chainable<any>
}
}

View File

@@ -0,0 +1,10 @@
'use strict'
import './commands'
before('Restore VMs from snapshot', () => {
cy.exec(
`node scripts/restore-vm.js && wait-on ${Cypress.config('baseUrl')} && wait-on tcp:${Cypress.env('xcpNgLts').host}:80`,
{ timeout: 300e3 },
)
})

View File

@@ -0,0 +1,32 @@
{
"private": true,
"name": "@xen-orchestra/test-e2e",
"version": "0.0.0",
"license": "AGPL-3.0-or-later",
"description": "E2E Tests for Xen Orchestra",
"repository": {
"directory": "@xen-orchestra/test-e2e",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=14"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"bin": {
"restore-vm": "./scripts/restore-vm.js"
},
"devDependencies": {
"cypress": "^9.7.0",
"eslint-plugin-chai-friendly": "^0.7.2",
"eslint-plugin-cypress": "^2.12.1",
"wait-on": "^6.0.1",
"xo-lib": "^0.11.1"
},
"scripts": {
"test": "cypress run"
}
}

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env node
'use strict'
/* eslint-disable no-console */
const Xo = require('xo-lib').default
const path = require('path')
const cypressJson = require(path.resolve(__dirname, '..', 'cypress.json'))
async function main() {
const xo = new Xo({ url: cypressJson.env.xoLab.url })
await xo.open()
await xo.signIn({
email: cypressJson.env.xoLab.username,
password: cypressJson.env.xoLab.password,
})
console.log('Reverting Test VMs from snapshots')
try {
await Promise.all([
xo.call('vm.revert', { snapshot: cypressJson.env.xenServerLts.snapshotId }),
xo.call('vm.revert', { snapshot: cypressJson.env.xcpNgLts.snapshotId }),
xo.call('vm.revert', { snapshot: cypressJson.env.xenOrchestra.snapshotId }),
])
} catch (error) {
console.error('Error happened while reverting VMs')
throw error
}
xo.close()
console.log('VMs reverted successfully.')
}
main()
/* eslint-enable no-console */

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/upload-ova",
"version": "0.1.5",
"version": "0.1.4",
"license": "AGPL-3.0-or-later",
"description": "Basic CLI to upload ova files to Xen-Orchestra",
"keywords": [
@@ -43,7 +43,7 @@
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.4.3"
"xo-vmdk-to-vhd": "^2.3.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -1,9 +0,0 @@
'use strict'
// TODO: remove when Node >=15.0
module.exports = class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}

View File

@@ -230,9 +230,8 @@ function mixin(mixins) {
defineProperties(xapiProto, descriptors)
}
mixin({
host: require('./host.js'),
SR: require('./sr.js'),
task: require('./task.js'),
host: require('./host.js'),
VBD: require('./vbd.js'),
VDI: require('./vdi.js'),
VIF: require('./vif.js'),

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "1.4.2",
"version": "1.0.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -15,7 +15,7 @@
"node": ">=14"
},
"peerDependencies": {
"xen-api": "^1.2.2"
"xen-api": "^1.2.0"
},
"scripts": {
"postversion": "npm publish --access public"
@@ -26,10 +26,8 @@
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"json-rpc-protocol": "^0.13.2",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.0.0",
"xo-common": "^0.8.0"
},
"private": false,

View File

@@ -1,179 +0,0 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState } = require('xo-common/api-errors')
const { VDI_FORMAT_VHD } = require('./index.js')
const assert = require('node:assert').strict
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
const AggregateError = require('./_AggregateError.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
const OC_MAINTENANCE = 'xo:maintenanceState'
class Sr {
async create({
content_type = 'user', // recommended by Citrix
device_config,
host,
name_description = '',
name_label,
physical_size = 0,
shared,
sm_config = {},
type,
}) {
const ref = await this.call(
'SR.create',
host,
device_config,
physical_size,
name_label,
name_description,
type,
content_type,
shared,
sm_config
)
// https://developer-docs.citrix.com/projects/citrix-hypervisor-sdk/en/latest/xc-api-extensions/#sr
this.setFieldEntry('SR', ref, 'other_config', 'auto-scan', 'true').catch(warn)
return ref
}
// Switch the SR to maintenance mode:
// - shutdown all running VMs with a VDI on this SR
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
// - clean shutdown is attempted, and falls back to a hard shutdown
// - unplug all connected hosts from this SR
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
const state = { timestamp: Date.now() }
// will throw if already in maintenance mode
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
const runningVms = new Map()
const handleVbd = async ref => {
const vmRef = await this.getField('VBD', ref, 'VM')
if (!runningVms.has(vmRef)) {
const power_state = await this.getField('VM', vmRef, 'power_state')
const isPaused = power_state === 'Paused'
if (isPaused || power_state === 'Running') {
runningVms.set(vmRef, isPaused)
}
}
}
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
})
{
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
const set = new Set(vmsToShutdown)
for (const vmUuid of runningVmUuids) {
if (!set.has(vmUuid)) {
throw incorrectState({
actual: vmsToShutdown,
expected: runningVmUuids,
property: 'vmsToShutdown',
})
}
}
}
state.shutdownVms = {}
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
try {
await this.callAsync('VM.clean_shutdown', ref)
} catch (error) {
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
await this.callAsync('VM.hard_shutdown', ref)
}
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
})
state.unpluggedPbds = []
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
if (await this.getField('PBD', ref, 'currently_attached')) {
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
await this.callAsync('PBD.unplug', ref)
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
}
})
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
}
// this method is best effort and will not stop on first error
async disableMaintenanceMode(ref) {
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
// will throw if not in maintenance mode
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
const errors = []
await asyncMap(state.unpluggedPbds, async uuid => {
try {
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
} catch (error) {
errors.push(error)
}
})
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
try {
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
} catch (error) {
errors.push(error)
}
})
if (errors.length !== 0) {
throw new AggregateError(errors)
}
}
async importVdi(
$defer,
ref,
stream,
{
format = VDI_FORMAT_VHD,
name_label = '[XO] Imported disk - ' + new Date().toISOString(),
virtual_size,
...vdiCreateOpts
} = {}
) {
if (virtual_size === undefined) {
if (format === VDI_FORMAT_VHD) {
const footer = await peekFooterFromStream(stream)
virtual_size = footer.currentSize
} else {
virtual_size = stream.length
assert.notEqual(virtual_size, undefined)
}
}
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size })
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
await this.VDI_importContent(vdiRef, stream, { format })
return vdiRef
}
}
module.exports = Sr
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })

View File

@@ -6,8 +6,6 @@ const { Ref } = require('xen-api')
const isVmRunning = require('./_isVmRunning.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:vbd')
const noop = Function.prototype
module.exports = class Vbd {
@@ -68,10 +66,8 @@ module.exports = class Vbd {
})
if (isVmRunning(powerState)) {
this.callAsync('VBD.plug', vbdRef).catch(warn)
await this.callAsync('VBD.plug', vbdRef)
}
return vbdRef
}
async unplug(ref) {

View File

@@ -1,6 +1,5 @@
'use strict'
const assert = require('node:assert').strict
const CancelToken = require('promise-toolbox/CancelToken')
const pCatch = require('promise-toolbox/catch')
const pRetry = require('promise-toolbox/retry')
@@ -31,7 +30,8 @@ class Vdi {
other_config = {},
read_only = false,
sharable = false,
SR = this.pool.default_SR,
sm_config,
SR,
tags,
type = 'user',
virtual_size,
@@ -39,10 +39,10 @@ class Vdi {
},
{
// blindly copying `sm_config` from another VDI can create problems,
// therefore it should be passed explicitly
// therefore it is ignored by default by this method
//
// see https://github.com/vatesfr/xen-orchestra/issues/4482
sm_config,
setSmConfig = false,
} = {}
) {
return this.call('VDI.create', {
@@ -51,7 +51,7 @@ class Vdi {
other_config,
read_only,
sharable,
sm_config,
sm_config: setSmConfig ? sm_config : undefined,
SR,
tags,
type,
@@ -87,8 +87,6 @@ class Vdi {
}
async importContent(ref, stream, { cancelToken = CancelToken.none, format }) {
assert.notEqual(format, undefined)
if (stream.length === undefined) {
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
}

View File

@@ -11,8 +11,7 @@ const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
const { JsonRpcError } = require('json-rpc-protocol')
const { incorrectState } = require('xo-common/api-errors.js')
const { Ref } = require('xen-api')
const extractOpaqueRef = require('./_extractOpaqueRef.js')
@@ -344,13 +343,7 @@ class Vm {
const vm = await this.getRecord('VM', vmRef)
if (!bypassBlockedOperation && 'destroy' in vm.blocked_operations) {
throw forbiddenOperation(
`destroy is blocked: ${
vm.blocked_operations.destroy === 'true'
? 'protected from accidental deletion'
: vm.blocked_operations.destroy
}`
)
throw new Error('destroy is blocked')
}
if (!forceDeleteDefaultTemplate && isDefaultTemplate(vm)) {
@@ -510,22 +503,6 @@ class Vm {
}
return ref
} catch (error) {
if (
// xxhash is the new form consistency hashing in CH 8.1 which uses a faster,
// more efficient hashing algorithm to generate the consistency checks
// in order to support larger files without the consistency checking process taking an incredibly long time
error.code === 'IMPORT_ERROR' &&
error.params?.some(
param =>
param.includes('INTERNAL_ERROR') &&
param.includes('Expected to find an inline checksum') &&
param.includes('.xxhash')
)
) {
warn('import', { error })
throw new JsonRpcError('Importing this VM requires XCP-ng or Citrix Hypervisor >=8.1')
}
// augment the error with as much relevant info as possible
const [poolMaster, sr] = await Promise.all([
safeGetRecord(this, 'host', this.pool.master),
@@ -537,31 +514,12 @@ class Vm {
}
}
async snapshot(
$defer,
vmRef,
{ cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label, unplugVusbs = false } = {}
) {
async snapshot($defer, vmRef, { cancelToken = CancelToken.none, ignoreNobakVdis = false, name_label } = {}) {
const vm = await this.getRecord('VM', vmRef)
const isHalted = vm.power_state === 'Halted'
// requires the VM to be halted because it's not possible to re-plug VUSB on a live VM
if (unplugVusbs && isHalted) {
// vm.VUSBs can be undefined (e.g. on XS 7.0.0)
const vusbs = vm.VUSBs
if (vusbs !== undefined) {
await asyncMap(vusbs, async ref => {
const vusb = await this.getRecord('VUSB', ref)
await vusb.$call('destroy')
$defer.call(this, 'call', 'VUSB.create', vusb.VM, vusb.USB_group, vusb.other_config)
})
}
}
let destroyNobakVdis = false
if (ignoreNobakVdis) {
if (isHalted) {
if (vm.power_state === 'Halted') {
await asyncMap(await listNobakVbds(this, vm.VBDs), async vbd => {
await this.VBD_destroy(vbd.$ref)
$defer.call(this, 'VBD_create', vbd)

View File

@@ -1,233 +1,5 @@
# ChangeLog
## **next**
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Home/Storage] Show which SRs are used for HA state files [#6339](https://github.com/vatesfr/xen-orchestra/issues/6339) (PR [#6384](https://github.com/vatesfr/xen-orchestra/pull/6384))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Backup/Restore] Fix backup list not loading on page load (PR [#6364](https://github.com/vatesfr/xen-orchestra/pull/6364))
- [Host] Fix `should not contains property ["ignoreBackup"]` on some host operations (PR [#6362](https://github.com/vatesfr/xen-orchestra/pull/6362))
### Packages to release
- @xen-orchestra/fs 3.0.0
- vhd-lib 4.0.0
- @xen-orchestra/backups 0.27.4
- @xen-orchestra/backups-cli 0.7.7
- @xen-orchestra/xapi 1.4.2
- xen-api 1.2.2
- @xen-orchestra/proxy 0.26.0
- vhd-cli 0.9.1
- xo-vmdk-to-vhd 2.4.3
- xo-server 5.101.0
- xo-web 5.102.0
## **5.73.1** (2022-08-04)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Bug fixes
- [Backup] Fix `incorrect backup size in metadata` on each merged VHD (PR [#6331](https://github.com/vatesfr/xen-orchestra/pull/6331))
- [Backup] Fix `assertionError [ERR_ASSERTION]: Expected values to be strictly equal` when resuming a merge (PR [#6349](https://github.com/vatesfr/xen-orchestra/pull/6349))
### Released packages
- @xen-orchestra/backups 0.27.3
- @xen-orchestra/fs 2.1.0
- @xen-orchestra/mixins 0.7.1
- @xen-orchestra/proxy 0.25.1
- vhd-cli 0.9.0
- vhd-lib 3.3.5
- xo-server 5.100.1
- xo-server-auth-saml 0.10.0
- xo-web 5.101.1
## **5.73.0** (2022-07-29)
### Highlights
- [REST API] VDI import now also supports the raw format
- HTTPS server can acquire SSL certificate from Let's Encrypt (PR [#6320](https://github.com/vatesfr/xen-orchestra/pull/6320))
### Enhancements
- Embedded HTTP/HTTPS proxy is now enabled by default
- [VM] Display a confirmation modal when stopping/restarting a protected VM (PR [#6295](https://github.com/vatesfr/xen-orchestra/pull/6295))
### Bug fixes
- [Home/VM] Show error when deleting VMs failed (PR [#6323](https://github.com/vatesfr/xen-orchestra/pull/6323))
- [REST API] Fix broken VDI after VHD import [#6327](https://github.com/vatesfr/xen-orchestra/issues/6327) (PR [#6326](https://github.com/vatesfr/xen-orchestra/pull/6326))
- [Netbox] Fix `ipaddr: the address has neither IPv6 nor IPv4 format` error (PR [#6328](https://github.com/vatesfr/xen-orchestra/pull/6328))
### Released packages
- @vates/async-each 1.0.0
- @xen-orchestra/fs 2.0.0
- @xen-orchestra/backups 0.27.2
- @xen-orchestra/backups-cli 0.7.6
- @xen-orchestra/mixins 0.7.0
- @xen-orchestra/xapi 1.4.1
- @xen-orchestra/proxy 0.25.0
- vhd-cli 0.8.1
- vhd-lib 3.3.4
- xo-cli 0.14.1
- xo-server 5.100.0
- xo-web 5.101.0
## **5.72.1** (2022-07-11)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Enhancements
- [SR] When SR is in maintenance, add "Maintenance mode" badge next to its name (PR [#6313](https://github.com/vatesfr/xen-orchestra/pull/6313))
### Bug fixes
- [Tasks] Fix tasks not displayed when running CR backup job [Forum#6038](https://xcp-ng.org/forum/topic/6038/not-seeing-tasks-any-more-as-admin) (PR [#6315](https://github.com/vatesfr/xen-orchestra/pull/6315))
- [Backup] Fix failing merge multiple VHDs at once (PR [#6317](https://github.com/vatesfr/xen-orchestra/pull/6317))
- [VM/Console] Fix _Connect with SSH/RDP_ when address is IPv6
- [Audit] Ignore side-effects free API methods `xoa.check`, `xoa.clearCheckCache` and `xoa.getHVSupportedVersions`
### Released packages
- @xen-orchestra/backups 0.27.0
- @xen-orchestra/backups-cli 0.7.5
- @xen-orchestra/proxy 0.23.5
- vhd-lib 3.3.2
- xo-server 5.98.1
- xo-server-audit 0.10.0
- xo-web 5.100.0
## **5.72.0** (2022-06-30)
### Highlights
- [Backup] Merge delta backups without copying data when using VHD directories on NFS/SMB/local remote(https://github.com/vatesfr/xen-orchestra/pull/6271))
- [Proxies] Ability to copy the proxy access URL (PR [#6287](https://github.com/vatesfr/xen-orchestra/pull/6287))
- [SR/Advanced] Ability to enable/disable _Maintenance Mode_ [#6215](https://github.com/vatesfr/xen-orchestra/issues/6215) (PRs [#6308](https://github.com/vatesfr/xen-orchestra/pull/6308), [#6297](https://github.com/vatesfr/xen-orchestra/pull/6297))
- [User] User tokens management through XO interface (PR [#6276](https://github.com/vatesfr/xen-orchestra/pull/6276))
- [Tasks, VM/General] Self Service users: show tasks related to their pools, hosts, SRs, networks and VMs (PR [#6217](https://github.com/vatesfr/xen-orchestra/pull/6217))
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup/Restore] Clearer error message when importing a VM backup requires XCP-n/CH >= 8.1 (PR [#6304](https://github.com/vatesfr/xen-orchestra/pull/6304))
- [Backup] Users can use VHD directory on any remote type (PR [#6273](https://github.com/vatesfr/xen-orchestra/pull/6273))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [VDI Import] Fix `this._getOrWaitObject is not a function`
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
- [OVA Import] Fix import stuck after first disk
- [File restore] Ignore symbolic links
### Released packages
- @vates/event-listeners-manager 1.0.1
- @vates/read-chunk 1.0.0
- @xen-orchestra/backups 0.26.0
- @xen-orchestra/backups-cli 0.7.4
- xo-remote-parser 0.9.1
- @xen-orchestra/fs 1.1.0
- @xen-orchestra/openflow 0.1.2
- @xen-orchestra/xapi 1.4.0
- @xen-orchestra/proxy 0.23.4
- @xen-orchestra/proxy-cli 0.3.1
- vhd-lib 3.3.1
- vhd-cli 0.8.0
- xo-vmdk-to-vhd 2.4.2
- xo-server 5.98.0
- xo-web 5.99.0
## **5.71.1 (2022-06-13)**
### Enhancements
- Show raw errors to administrators instead of _unknown error from the peer_ (PR [#6260](https://github.com/vatesfr/xen-orchestra/pull/6260))
### Bug fixes
- [New SR] Fix `method.startsWith is not a function` when creating an _ext_ SR
- Import VDI content now works when there is a HTTP proxy between XO and the host (PR [#6261](https://github.com/vatesfr/xen-orchestra/pull/6261))
- [Backup] Fix `undefined is not iterable (cannot read property Symbol(Symbol.iterator))` on XS 7.0.0
- [Backup] Ensure a warning is shown if a target preparation step fails (PR [#6266](https://github.com/vatesfr/xen-orchestra/pull/6266))
- [OVA Export] Avoid creating a zombie task (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
- [OVA Export] Increase speed by lowering compression to acceptable level (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
- [OVA Export] Fix broken OVAs due to special characters in VM name (PR [#6267](https://github.com/vatesfr/xen-orchestra/pull/6267))
### Released packages
- @xen-orchestra/backups 0.25.0
- @xen-orchestra/backups-cli 0.7.3
- xen-api 1.2.1
- @xen-orchestra/xapi 1.2.0
- @xen-orchestra/proxy 0.23.2
- @xen-orchestra/proxy-cli 0.3.0
- xo-cli 0.14.0
- xo-vmdk-to-vhd 2.4.1
- xo-server 5.96.0
- xo-web 5.97.2
## **5.71.0 (2022-05-31)**
### Highlights
- [Backup] _Restore Health Check_ can now be configured to be run automatically during a backup schedule (PRs [#6227](https://github.com/vatesfr/xen-orchestra/pull/6227), [#6228](https://github.com/vatesfr/xen-orchestra/pull/6228), [#6238](https://github.com/vatesfr/xen-orchestra/pull/6238) & [#6242](https://github.com/vatesfr/xen-orchestra/pull/6242))
- [Backup] VMs with USB Pass-through devices are now supported! The advanced _Offline Snapshot Mode_ setting must be enabled. For Full Backup or Disaster Recovery jobs, Rolling Snapshot needs to be anabled as well. (PR [#6239](https://github.com/vatesfr/xen-orchestra/pull/6239))
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
- [RPU/Host] If some backup jobs are running on the pool, ask for confirmation before starting an RPU, shutdown/rebooting a host or restarting a host's toolstack (PR [6232](https://github.com/vatesfr/xen-orchestra/pull/6232))
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
- [REST API] Support VDI creation via VHD import
### Enhancements
- [Backup] Merge multiple VHDs at once which will speed up the merging phase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
- [VM migration] Ensure the VM can be migrated before performing the migration to avoid issues [#5301](https://github.com/vatesfr/xen-orchestra/issues/5301) (PR [#6245](https://github.com/vatesfr/xen-orchestra/pull/6245))
- [Backup] Show any detected errors on existing backups instead of fixing them silently (PR [#6207](https://github.com/vatesfr/xen-orchestra/pull/6225))
- Created SRs will now have auto-scan enabled similarly to what XenCenter does (PR [#6246](https://github.com/vatesfr/xen-orchestra/pull/6246))
- [RPU] Disable scheduled backup jobs during RPU (PR [#6244](https://github.com/vatesfr/xen-orchestra/pull/6244))
### Bug fixes
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
- [VM/advanced] Fix various errors when adding ACLs [#6213](https://github.com/vatesfr/xen-orchestra/issues/6213) (PR [#6230](https://github.com/vatesfr/xen-orchestra/pull/6230))
- [Home/Self] Don't make VM's resource set name clickable for non admin users as they aren't allowed to view the Self Service page (PR [#6252](https://github.com/vatesfr/xen-orchestra/pull/6252))
- [load-balancer] Fix density mode failing to shutdown hosts (PR [#6253](https://github.com/vatesfr/xen-orchestra/pull/6253))
- [Health] Make "Too many snapshots" table sortable by number of snapshots (PR [#6255](https://github.com/vatesfr/xen-orchestra/pull/6255))
- [Remote] Show complete errors instead of only a potentially missing message (PR [#6216](https://github.com/vatesfr/xen-orchestra/pull/6216))
### Released packages
- @xen-orchestra/self-signed 0.1.3
- vhd-lib 3.2.0
- @xen-orchestra/fs 1.0.3
- vhd-cli 0.7.2
- xo-vmdk-to-vhd 2.4.0
- @xen-orchestra/upload-ova 0.1.5
- @xen-orchestra/xapi 1.1.0
- @xen-orchestra/backups 0.24.0
- @xen-orchestra/backups-cli 0.7.2
- @xen-orchestra/emit-async 1.0.0
- @xen-orchestra/mixins 0.5.0
- @xen-orchestra/proxy 0.23.1
- xo-server 5.95.0
- xo-web 5.97.1
- xo-server-backup-reports 0.17.0
## 5.70.2 (2022-05-16)
### Bug fixes
@@ -263,6 +35,8 @@
## 5.70.0 (2022-04-29)
<img id="latest" src="https://badgen.net/badge/channel/latest/yellow" alt="Channel: latest" />
### Highlights
- [VM export] Feat export to `ova` format (PR [#6006](https://github.com/vatesfr/xen-orchestra/pull/6006))
@@ -299,6 +73,8 @@
## **5.69.2** (2022-04-13)
<img id="stable" src="https://badgen.net/badge/channel/stable/green" alt="Channel: stable" />
### Enhancements
- [Rolling Pool Update] New algorithm for XCP-ng updates (PR [#6188](https://github.com/vatesfr/xen-orchestra/pull/6188))

View File

@@ -7,28 +7,50 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Dashboard/Health] Detect broken VHD chains and display missing parent VDIs (PR [#6356](https://github.com/vatesfr/xen-orchestra/pull/6356))
- [Backup] Merge multiple VHDs at once which will speed up the merging ĥase after reducing the retention of a backup job(PR [#6184](https://github.com/vatesfr/xen-orchestra/pull/6184))
- [Backup] Implement file cache for listing the backups of a VM (PR [#6220](https://github.com/vatesfr/xen-orchestra/pull/6220))
- [Backup] Add setting `backups.metadata.defaultSettings.unconditionalSnapshot` in `xo-server`'s configuration file to force a snapshot even when not required by the backup, this is useful to avoid locking the VM halted during the backup (PR [#6221](https://github.com/vatesfr/xen-orchestra/pull/6221))
- [XO Web] Add ability to configure a default filter for Storage [#6236](https://github.com/vatesfr/xen-orchestra/issues/6236) (PR [#6237](https://github.com/vatesfr/xen-orchestra/pull/6237))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [S3] Fix S3 remote with empty directory not showing anything to restore (PR [#6218](https://github.com/vatesfr/xen-orchestra/pull/6218))
- [S3] remote fom did not save the `https` and `allow unatuhorized`during remote creation (PR [#6219](https://github.com/vatesfr/xen-orchestra/pull/6219))
### Packages to release
> When modifying a package, add it here with its release type.
> Packages will be released in the order they are here, therefore, they should
> be listed by inverse order of dependency.
>
> The format is the following: - `$packageName` `$releaseType`
> Rule of thumb: add packages on top.
>
> Where `$releaseType` is
> The format is the following: - `$packageName` `$version`
>
> Where `$version` is
>
> - patch: if the change is a bug fix or a simple code improvement
> - minor: if the change is a new feature
> - major: if the change breaks compatibility
>
> Keep this list alphabetically ordered to avoid merge conflicts
> In case of conflict, the highest (lowest in previous list) `$version` wins.
>
> The `gen-deps-list` script can be used to generate this list of dependencies
> Run `scripts/gen-deps-list.js --help` for usage
<!--packages-start-->
- vhd-lib patch
- @xen-orchestra/fs patch
- vhd-cli patch
- xo-vmdk-to-vhd minor
- @xen-orchestra/upload-ova patch
- @xen-orchestra/backups minor
- @xen-orchestra/backups-cli patch
- @xen-orchestra/emit-async major
- @xen-orchestra/mixins minor
- @xen-orchestra/proxy minor
- xo-server minor
- xo-web minor

View File

@@ -4,11 +4,11 @@
We apply patches and fix security issues for the following versions:
| Version | Supported |
| --------------- | ------------------ |
| XOA `latest` | :white_check_mark: |
| XOA `stable` | :white_check_mark: |
| `master` branch | :white_check_mark: |
| Version | Supported |
| ------- | ------------------ |
| XOA `latest` | :white_check_mark: |
| XOA `stable` | :white_check_mark: |
| `master` branch | :white_check_mark: |
| anything else | :x: |
## Reporting a Vulnerability

View File

@@ -99,41 +99,3 @@ To solve this issue, we recommend that you:
- wait until the other backup job is completed/the merge process is done
- make sure your remote storage is not being overworked
## Error: HTTP connection has timed out
This error occurs when XO tries to fetch data from a host, via the HTTP GET method. This error essentially means that the host (dom0 specifically) isn't responding anymore, after we asked it to expose the disk to be exported. This could be a symptom of having an overloaded dom0 that couldn't respond fast enough. It can also be caused by dom0 having trouble attaching the disk in question to expose it for fetching via HTTP, or just not having enough resources to answer our GET request.
::: warning
As a temporary workaround you can increase the timeout higher than the default value, to allow the host more time to respond. But you will need to eventually diagnose the root cause of the slow host response or else you risk the issue returning.
:::
Create the following file:
```
/etc/xo-server/config.httpInactivityTimeout.toml
```
Add the following lines:
```
# XOA Support - Work-around HTTP timeout issue during backups
[xapiOptions]
httpInactivityTimeout = 1800000 # 30 mins
```
## Error: Expected values to be strictly equal
This error occurs at the end of the transfer. XO checks the exported VM disk integrity, to ensure it's a valid VHD file (we check the VHD header as well as the footer of the received file). This error means the header and footage did not match, so the file is incomplete (likely the export from dom0 failed at some point and we only received a partial HD/VM disk).
## Error: the job is already running
This means the same job is still running, typically from the last scheduled run. This happens when you have a backup job scheduled too often. It can also occur if you have a long timeout configured for the job, and a slow VM export or slow transfer to your remote. In either case, you need to adjust your backup schedule to allow time for the job to finish or timeout before the next scheduled run. We consider this an error to ensure you'll be notified that the planned schedule won't run this time because the previous one isn't finished.
## Error: VDI_IO_ERROR
This error comes directly from your host/dom0, and not XO. Essentially, XO asked the host to expose a VM disk to export via HTTP (as usual), XO managed to make the HTTP GET connection, and even start the transfer. But then at some point the host couldn't read the VM disk any further, causing this error on the host side. This might happen if the VDI is corrupted on the storage, or if there's a race condition during snapshots. More rarely, this can also occur if your SR is just too slow to keep up with the export as well as live VM traffic.
## Error: no XAPI associated to <UUID>
This message means that XO had a UUID of a VM to backup, but when the job ran it couldn't find any object matching it. This could be caused by the pool where this VM lived no longer being connected to XO. Double-check that the pool hosting the VM is currently connected under Settings > Servers. You can also search for the VM UUID in the Home > VMs search bar. If you can see it, run the backup job again and it will work. If you cannot, either the VM was removed or the pool is not connected.

View File

@@ -66,13 +66,12 @@ You shouldn't have to change this. It's the path where `xo-web` files are served
## Custom certificate authority
If you use certificates signed by an in-house CA for your XCP-ng or XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you can use the [`NODE_EXTRA_CA_CERTS`](https://nodejs.org/api/cli.html#cli_node_extra_ca_certs_file) environment variable.
If you use certificates signed by an in-house CA for your XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you need to add the `--use-openssl-ca` option in Node, but also add the CA to your trust store (`/etc/ssl/certs` via `update-ca-certificates` in your XOA).
To enable this option in your XOA, create `/etc/systemd/system/xo-server.service.d/ca.conf` with the following content:
To enable this option in your XOA, edit the `/etc/systemd/system/xo-server.service` file and add this:
```
[Service]
Environment=NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/my-cert.crt
Environment=NODE_OPTIONS=--use-openssl-ca
```
Don't forget to reload `systemd` conf and restart `xo-server`:
@@ -82,7 +81,9 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
# systemctl restart xo-server.service
```
> For XO Proxy, the process is almost the same except the file to create is `/etc/systemd/system/xo-proxy.service.d/ca.conf` and the service to restart is `xo-proxy.service`.
:::tip
The `--use-openssl-ca` option is ignored by Node if Xen-Orchestra is run with Linux capabilities. Capabilities are commonly used to bind applications to privileged ports (<1024) (i.e. `CAP_NET_BIND_SERVICE`). Local NAT rules (`iptables`) or a reverse proxy would be required to use privileged ports and a custom certficate authority.
:::
## Redis server

View File

@@ -18,8 +18,6 @@ If you lose your main pool, you can start the copy on the other side, with very
:::warning
It is normal that you can't boot the copied VM directly: we protect it. The normal workflow is to make a clone and then work on it.
This also affects VMs with "Auto Power On" enabled, because of our protections you can ensure these won't start on your CR destination if you happen to reboot it.
:::
## Configure it

View File

@@ -24,15 +24,16 @@ Please, do explain:
The best way to propose a change to the documentation or code is
to create a [GitHub pull request](https://help.github.com/articles/using-pull-requests/).
1. Fork the [Xen Orchestra repository](https://github.com/vatesfr/xen-orchestra) using the Fork button
2. Follow [the documentation](installation.md#from-the-sources) to install and run Xen Orchestra from the sources
3. Create a branch for your work
4. Edit the source files
5. Add a summary of your changes to `CHANGELOG.unreleased.md`, if your changes do not relate to an existing changelog item and update the list of packages that must be released to take your changes into account
6. [Create a pull request](https://github.com/vatesfr/xen-orchestra/compare) for this branch against the `master` branch
7. Push into the branch until the pull request is ready to merge
8. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
9. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
:::tip
Your pull request should always be against the `master` branch and not against `stable` which is the stable branch!
:::
1. Create a branch for your work
2. Add a summary of your changes to `CHANGELOG.md` under the `next` section, if your changes do not relate to an existing changelog item
3. Create a pull request for this branch against the `master` branch
4. Push into the branch until the pull request is ready to merge
5. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
6. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
### Issue triage

View File

@@ -35,7 +35,3 @@ A higher retention number will lead to huge space occupation on your SR.
If you boot a copy of your production VM, be careful: if they share the same static IP, you'll have troubles.
A good way to avoid this kind of problem is to remove the network interface on the DR VM and check if the export is correctly done.
:::warning
For each DR replicated VM, we add "start" as a blocked operation, meaning even VMs with "Auto power on" enabled will not be started on your DR destination if it reboots.
:::

View File

@@ -141,29 +141,6 @@ curl \
> myDisk.vhd
```
## VDI Import
A VHD or a raw export can be imported on an SR to create a new VDI at `/rest/v0/srs/<sr uuid>/vdis`.
```bash
curl \
-X POST \
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
-T myDisk.raw \
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?raw&name_label=my_imported_VDI' \
| cat
```
> Note: the final `| cat` ensures cURL's standard output is not a TTY, which is necessary for upload stats to be dislayed.
This request returns the UUID of the created VDI.
The following query parameters are supported to customize the created VDI:
- `name_label`
- `name_description`
- `raw`: this parameter must be used if importing a raw export instead of a VHD
## The future
We are adding features and improving the REST API step by step. If you have interesting use cases or feedback, please ask directly at <https://xcp-ng.org/forum/category/12/xen-orchestra>

View File

@@ -15,7 +15,7 @@ Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.
:::warning
As VxLAN and GRE are protocols using extra encapsulation, they require extra bits on a network packet. If you create a Global Private Network with a default MTU at `1500`, you won't be able to use it "as is" in your VMs, unless you configure a smaller MTU for each virtual interface, in your VM operating system (eg: `1400`).
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipments.
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipements.
:::
### Network creation

View File

@@ -206,23 +206,6 @@ In any case, if you lose your password, you can reset the database and get the d
You can verify that your time is correctly set with the `date` command. To set XOA to your current timezone, use `sudo dpkg-reconfigure tzdata`.
## Setting a custom NTP server
By default, XOA is configured to use the standard Debian NTP servers:
```
pool 0.debian.pool.ntp.org iburst
pool 1.debian.pool.ntp.org iburst
pool 2.debian.pool.ntp.org iburst
pool 3.debian.pool.ntp.org iburst
```
If you'd like to use your own NTP server or another pool, you can make the changes directly in `/etc/ntp.conf`.
You will need to be root to edit this file (or use `sudo`). We recommend adding your custom server to the top of the list, leaving the debian server entries if possible.
For changes to take effect, you will need to restart NTP: `sudo systemctl restart ntp.service`.
## Restart the service
You can restart Xen Orchestra by accessing XOA via SSH (or console) and running `systemctl restart xo-server.service`.

Some files were not shown because too many files have changed in this diff Show More