Compare commits
49 Commits
xo-web-v5.
...
@xen-orche
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62a8b94221 | ||
|
|
21faaeb33d | ||
|
|
0525fc5909 | ||
|
|
a1a53bb285 | ||
|
|
0c453c4415 | ||
|
|
d0406f9736 | ||
|
|
ba74b8603d | ||
|
|
c675a4d61d | ||
|
|
965c45bc70 | ||
|
|
139a22602a | ||
|
|
e0e4969198 | ||
|
|
08d69d95b3 | ||
|
|
4e6c507ba9 | ||
|
|
fd06374365 | ||
|
|
a07ebc636a | ||
|
|
4c151ac9aa | ||
|
|
05c425698f | ||
|
|
2a961979e6 | ||
|
|
211ede92cc | ||
|
|
256af03772 | ||
|
|
654fd5a4f9 | ||
|
|
541d90e49f | ||
|
|
974e7038e7 | ||
|
|
e2f5b30aa9 | ||
|
|
3483e7d9e0 | ||
|
|
56cb20a1af | ||
|
|
64929653dd | ||
|
|
c955da9bc6 | ||
|
|
291354fa8e | ||
|
|
905d736512 | ||
|
|
3406d6e2a9 | ||
|
|
fc10b5ffb9 | ||
|
|
f89c313166 | ||
|
|
7c734168d0 | ||
|
|
1e7bfec2ce | ||
|
|
1eb0603b4e | ||
|
|
4b32730ce8 | ||
|
|
ad083c1d9b | ||
|
|
b4f84c2de2 | ||
|
|
fc17443ce4 | ||
|
|
342ae06b21 | ||
|
|
093fb7f959 | ||
|
|
f6472424ad | ||
|
|
31ed3767c6 | ||
|
|
366acb65ea | ||
|
|
7c6946931b | ||
|
|
5d971433a5 | ||
|
|
05264b326b | ||
|
|
fdd5c6bfd8 |
@@ -21,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -40,6 +40,13 @@ module.exports = {
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
// disabled because not always relevant, we might reconsider in the future
|
||||
//
|
||||
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
|
||||
//
|
||||
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
378
@xen-orchestra/backups-cli/index.js
Executable file
378
@xen-orchestra/backups-cli/index.js
Executable file
@@ -0,0 +1,378 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const args = process.argv.slice(2)
|
||||
|
||||
if (
|
||||
args.length === 0 ||
|
||||
/^(?:-h|--help)$/.test(args[0]) ||
|
||||
args[0] !== 'clean-vms'
|
||||
) {
|
||||
console.log('Usage: xo-backups clean-vms [--force] xo-vm-backups/*')
|
||||
// eslint-disable-next-line no-process-exit
|
||||
return process.exit(1)
|
||||
}
|
||||
|
||||
// remove `clean-vms` arg which is the only available command ATM
|
||||
args.splice(0, 1)
|
||||
|
||||
// only act (ie delete files) if `--force` is present
|
||||
const force = args[0] === '--force'
|
||||
if (force) {
|
||||
args.splice(0, 1)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const lockfile = require('proper-lockfile')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { curryRight, flatten } = require('lodash')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { pipe, promisifyAll } = require('promise-toolbox')
|
||||
|
||||
const fs = promisifyAll(require('fs'))
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
|
||||
const filter = (...args) => thisArg => thisArg.filter(...args)
|
||||
|
||||
// TODO: better check?
|
||||
|
||||
// our heuristic is not good enough, there has been some false positives
|
||||
// (detected as invalid by us but valid by `tar` and imported with success),
|
||||
// either:
|
||||
// - these files were normal but the check is incorrect
|
||||
// - these files were invalid but without data loss
|
||||
// - these files were invalid but with silent data loss
|
||||
//
|
||||
// FIXME: the heuristic does not work if the XVA is compressed, we need to
|
||||
// implement a specific test for it
|
||||
//
|
||||
// maybe reading the end of the file looking for a file named
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async path => {
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, buf, 0, buf.length, size - buf.length),
|
||||
buf.length
|
||||
)
|
||||
return buf.every(_ => _ === 0)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidTar', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const readDir = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = `${path}/${entry}`
|
||||
})
|
||||
|
||||
return entries
|
||||
},
|
||||
error => {
|
||||
// a missing dir is by definition empty
|
||||
if (error != null && error.code === 'ENOENT') {
|
||||
return []
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
force && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
readDir,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas] = await readDir(vmDir).then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidTar(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
asyncMap(args, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
}
|
||||
}).catch(error => console.error('main', error))
|
||||
27
@xen-orchestra/backups-cli/package.json
Normal file
27
@xen-orchestra/backups-cli/package.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"bin": {
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/backups-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.0.0"
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.4",
|
||||
"version": "1.0.5",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -5,14 +5,21 @@ import parse from './parse'
|
||||
|
||||
const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
function nextDelay(schedule) {
|
||||
const now = schedule._createDate()
|
||||
return next(schedule._schedule, now) - now
|
||||
}
|
||||
|
||||
class Job {
|
||||
constructor(schedule, fn) {
|
||||
let scheduledDate
|
||||
const wrapper = () => {
|
||||
const now = Date.now()
|
||||
if (scheduledDate > now) {
|
||||
// we're early, delay
|
||||
//
|
||||
// no need to check _isEnabled, we're just delaying the existing timeout
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4625
|
||||
this._timeout = setTimeout(wrapper, scheduledDate - now)
|
||||
return
|
||||
}
|
||||
|
||||
this._isRunning = true
|
||||
|
||||
let result
|
||||
@@ -32,7 +39,9 @@ class Job {
|
||||
this._isRunning = false
|
||||
|
||||
if (this._isEnabled) {
|
||||
const delay = nextDelay(schedule)
|
||||
const now = Date.now()
|
||||
scheduledDate = +schedule._createDate()
|
||||
const delay = scheduledDate - now
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"get-stream": "^4.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-dev": "^1.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
44
CHANGELOG.md
44
CHANGELOG.md
@@ -4,20 +4,50 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
### Bug fixes
|
||||
|
||||
### Released packages
|
||||
|
||||
|
||||
## **5.39.1** (2019-10-11)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
|
||||
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.50.3
|
||||
|
||||
|
||||
## **5.39.0** (2019-09-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
|
||||
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
|
||||
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/new] Clarify address formats [#4450](https://github.com/vatesfr/xen-orchestra/issues/4450) (PR [#4460](https://github.com/vatesfr/xen-orchestra/pull/4460))
|
||||
- [Backup NG/New] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4411](https://github.com/vatesfr/xen-orchestra/pull/4411))
|
||||
- [VM/disks] Don't hide disks that are attached to the same VM twice [#4400](https://github.com/vatesfr/xen-orchestra/issues/4400) (PR [#4414](https://github.com/vatesfr/xen-orchestra/pull/4414))
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
|
||||
- [SDN Controller] Ability to configure MTU for private networks (PR [#4491](https://github.com/vatesfr/xen-orchestra/pull/4491))
|
||||
- [VM Export] Filenames are now prefixed with datetime [#4503](https://github.com/vatesfr/xen-orchestra/issues/4503)
|
||||
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
|
||||
- [Settings/Logs] Differenciate XS/XCP-ng errors from XO errors [#4101](https://github.com/vatesfr/xen-orchestra/issues/4101) (PR [#4385](https://github.com/vatesfr/xen-orchestra/pull/4385))
|
||||
- [Backups] Improve performance by caching logs consolidation (PR [#4541](https://github.com/vatesfr/xen-orchestra/pull/4541))
|
||||
- [New VM] Cloud Init available for all plans (PR [#4543](https://github.com/vatesfr/xen-orchestra/pull/4543))
|
||||
- [Servers] IPv6 addresses can be used [#4520](https://github.com/vatesfr/xen-orchestra/issues/4520) (PR [#4521](https://github.com/vatesfr/xen-orchestra/pull/4521)) \
|
||||
Note: They must enclosed in brackets to differentiate with the port, e.g.: `[2001:db8::7334]` or `[ 2001:db8::7334]:4343`
|
||||
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -47,12 +77,12 @@
|
||||
- xo-server-sdn-controller v0.3.0
|
||||
- @xen-orchestra/template v0.1.0
|
||||
- xo-server v5.50.1
|
||||
- xo-web v5.50.1
|
||||
- xo-web v5.50.2
|
||||
|
||||
|
||||
## **5.38.0** (2019-08-29)
|
||||
|
||||

|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -80,8 +110,6 @@
|
||||
|
||||
## **5.37.1** (2019-08-06)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
|
||||
@@ -3,16 +3,29 @@
|
||||
> Keep in mind the changelog is addressed to **users** and should be
|
||||
> understandable by them.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `xo-server` requires Node 8.
|
||||
|
||||
### Enhancements
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
|
||||
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
|
||||
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
|
||||
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
|
||||
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
|
||||
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
|
||||
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Hub] Fix responsive hub VMs [#4557](https://github.com/vatesfr/xen-orchestra/issues/4557) (PR [#4558](https://github.com/vatesfr/xen-orchestra/pull/4558))
|
||||
- [Hub/resource] Fix icon remove button (PR [#4559](https://github.com/vatesfr/xen-orchestra/pull/4559))
|
||||
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
|
||||
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
|
||||
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
|
||||
|
||||
### Released packages
|
||||
|
||||
@@ -21,5 +34,12 @@
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- @xen-orchestra/cron v1.0.5
|
||||
- xo-server-transport-icinga2 v0.1.0
|
||||
- xo-server-sdn-controller v0.3.1
|
||||
- xo-server v5.51.0
|
||||
- xo-web v5.51.0
|
||||
|
||||
### Dropped packages
|
||||
|
||||
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
|
||||
|
||||
BIN
docs/assets/release-channels.png
Normal file
BIN
docs/assets/release-channels.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 99 KiB |
@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
|
||||
|
||||
```
|
||||
$ node -v
|
||||
v8.12.0
|
||||
v8.16.2
|
||||
```
|
||||
|
||||
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
|
||||
|
||||
@@ -41,6 +41,20 @@ However, if you want to start a manual check, you can do it by clicking on the "
|
||||
|
||||

|
||||
|
||||
#### Release channel
|
||||
In Xen Orchestra, you can make a choice between two different release channels.
|
||||
|
||||
##### Stable
|
||||
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
|
||||
|
||||
##### Latest
|
||||
|
||||
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
|
||||
|
||||
> To select the release channel of your choice, go to the XOA > Updates view.
|
||||
|
||||

|
||||
|
||||
#### Upgrade
|
||||
|
||||
If a new version is found, you'll have an upgrade button and its tooltip displayed:
|
||||
|
||||
@@ -12,18 +12,18 @@
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-node": "^10.0.0",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.106.3",
|
||||
"flow-bin": "^0.109.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -39,10 +39,10 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
|
||||
@@ -21,12 +21,13 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -37,7 +38,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^2.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import resolveRelativeFromFile from './_resolveRelativeFromFile'
|
||||
|
||||
@@ -13,18 +14,23 @@ import {
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
export default async function createSyntheticStream(handler, path) {
|
||||
const { warn } = createLogger('vhd-lib:createSyntheticStream')
|
||||
|
||||
export default async function createSyntheticStream(handler, paths) {
|
||||
const fds = []
|
||||
const cleanup = () => {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
warn('error while closing file', {
|
||||
error,
|
||||
fd: fds[i],
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const open = async path => {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
@@ -32,11 +38,18 @@ export default async function createSyntheticStream(handler, path) {
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
return vhd
|
||||
}
|
||||
if (typeof paths === 'string') {
|
||||
let path = paths
|
||||
let vhd
|
||||
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
} else {
|
||||
for (const path of paths) {
|
||||
await open(path)
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import assert from 'assert'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
@@ -15,10 +16,7 @@ import {
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
const { debug } = createLogger('vhd-lib:Vhd')
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
"@babel/core": "^7.1.5",
|
||||
"@babel/preset-env": "^7.1.5",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"cross-env": "^5.1.4",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -60,7 +60,7 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
|
||||
import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { filter, find } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
@@ -110,7 +110,7 @@ const main = async args => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (isArray(value) ? Promise.all(value) : value)),
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
@@ -4,7 +4,7 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import { EventEmitter } from 'events'
|
||||
import { isArray, map, noop, omit } from 'lodash'
|
||||
import { map, noop, omit } from 'lodash'
|
||||
import {
|
||||
cancelable,
|
||||
defer,
|
||||
@@ -113,7 +113,7 @@ export class Xapi extends EventEmitter {
|
||||
this._watchedTypes = undefined
|
||||
const { watchEvents } = opts
|
||||
if (watchEvents !== false) {
|
||||
if (isArray(watchEvents)) {
|
||||
if (Array.isArray(watchEvents)) {
|
||||
this._watchedTypes = watchEvents
|
||||
}
|
||||
this.watchEvents()
|
||||
@@ -1075,7 +1075,7 @@ export class Xapi extends EventEmitter {
|
||||
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
|
||||
|
||||
const value = data[field]
|
||||
if (isArray(value)) {
|
||||
if (Array.isArray(value)) {
|
||||
if (value.length === 0 || isOpaqueRef(value[0])) {
|
||||
getters[$field] = function() {
|
||||
const value = this[field]
|
||||
|
||||
@@ -38,16 +38,16 @@
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^3.1.3",
|
||||
"micromatch": "^4.0.2",
|
||||
"mkdirp": "^0.5.1",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"pretty-ms": "^5.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -56,7 +56,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -7,7 +7,6 @@ const promisify = require('bluebird').promisify
|
||||
const readFile = promisify(require('fs').readFile)
|
||||
const writeFile = promisify(require('fs').writeFile)
|
||||
|
||||
const assign = require('lodash/assign')
|
||||
const l33t = require('l33teral')
|
||||
const mkdirp = promisify(require('mkdirp'))
|
||||
const xdgBasedir = require('xdg-basedir')
|
||||
@@ -41,7 +40,7 @@ const save = (exports.save = function(config) {
|
||||
|
||||
exports.set = function(data) {
|
||||
return load().then(function(config) {
|
||||
return save(assign(config, data))
|
||||
return save(Object.assign(config, data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ const getKeys = require('lodash/keys')
|
||||
const hrp = require('http-request-plus').default
|
||||
const humanFormat = require('human-format')
|
||||
const identity = require('lodash/identity')
|
||||
const isArray = require('lodash/isArray')
|
||||
const isObject = require('lodash/isObject')
|
||||
const micromatch = require('micromatch')
|
||||
const nicePipe = require('nice-pipe')
|
||||
@@ -298,7 +297,11 @@ async function listCommands(args) {
|
||||
str.push(
|
||||
name,
|
||||
'=<',
|
||||
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
|
||||
type == null
|
||||
? 'unknown type'
|
||||
: Array.isArray(type)
|
||||
? type.join('|')
|
||||
: type,
|
||||
'>'
|
||||
)
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { isArray, iteratee } from 'lodash'
|
||||
import { iteratee } from 'lodash'
|
||||
|
||||
class XoError extends BaseError {
|
||||
constructor({ code, message, data }) {
|
||||
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
|
||||
}))
|
||||
|
||||
export const invalidParameters = create(10, (message, errors) => {
|
||||
if (isArray(message)) {
|
||||
if (Array.isArray(message)) {
|
||||
errors = message
|
||||
message = undefined
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -39,14 +39,14 @@
|
||||
"inquirer": "^7.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -354,7 +354,7 @@ class BackupReportsXoPlugin {
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
success: log.status === 'success',
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
@@ -390,7 +390,7 @@ class BackupReportsXoPlugin {
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
@@ -646,7 +646,7 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
success: log.status === 'success',
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
@@ -656,7 +656,7 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
_sendReport({ markdown, subject, success, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
@@ -676,9 +676,14 @@ class BackupReportsXoPlugin {
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: nagiosStatus,
|
||||
status: success ? 0 : 2,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
xo.sendIcinga2Status !== undefined &&
|
||||
xo.sendIcinga2Status({
|
||||
status: success ? 'OK' : 'CRITICAL',
|
||||
message: markdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -708,7 +713,7 @@ class BackupReportsXoPlugin {
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
@@ -904,7 +909,7 @@ class BackupReportsXoPlugin {
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
success: globalSuccess,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"name": "xo-server-cloud",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
"cloud",
|
||||
"orchestra",
|
||||
"plugin",
|
||||
"xen",
|
||||
"xen-orchestra",
|
||||
"xo-server"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-cloud",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Pierre Donias",
|
||||
"email": "pierre.donias@gmail.com"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
@@ -1,208 +0,0 @@
|
||||
import Client, { createBackoff } from 'jsonrpc-websocket-client'
|
||||
import hrp from 'http-request-plus'
|
||||
|
||||
const WS_URL = 'ws://localhost:9001'
|
||||
const HTTP_URL = 'http://localhost:9002'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class XoServerCloud {
|
||||
constructor({ xo }) {
|
||||
this._xo = xo
|
||||
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._key = null
|
||||
}
|
||||
|
||||
configure(configuration) {
|
||||
this._conf = configuration
|
||||
}
|
||||
|
||||
async load() {
|
||||
const getResourceCatalog = this._getCatalog.bind(this)
|
||||
getResourceCatalog.description =
|
||||
"Get the list of user's available resources"
|
||||
getResourceCatalog.permission = 'admin'
|
||||
getResourceCatalog.params = {
|
||||
filters: { type: 'object', optional: true },
|
||||
}
|
||||
|
||||
const registerResource = ({ namespace }) =>
|
||||
this._registerResource(namespace)
|
||||
registerResource.description = 'Register a resource via cloud plugin'
|
||||
registerResource.params = {
|
||||
namespace: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
registerResource.permission = 'admin'
|
||||
|
||||
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
|
||||
this
|
||||
)
|
||||
|
||||
downloadAndInstallResource.description =
|
||||
'Download and install a resource via cloud plugin'
|
||||
|
||||
downloadAndInstallResource.params = {
|
||||
id: { type: 'string' },
|
||||
namespace: { type: 'string' },
|
||||
version: { type: 'string' },
|
||||
sr: { type: 'string' },
|
||||
}
|
||||
|
||||
downloadAndInstallResource.resolve = {
|
||||
sr: ['sr', 'SR', 'administrate'],
|
||||
}
|
||||
|
||||
downloadAndInstallResource.permission = 'admin'
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
cloud: {
|
||||
downloadAndInstallResource,
|
||||
getResourceCatalog,
|
||||
registerResource,
|
||||
},
|
||||
})
|
||||
this._unsetRequestResource = this._xo.defineProperty(
|
||||
'requestResource',
|
||||
this._requestResource,
|
||||
this
|
||||
)
|
||||
|
||||
const updater = (this._updater = new Client(WS_URL))
|
||||
const connect = () =>
|
||||
updater.open(createBackoff()).catch(error => {
|
||||
console.error('xo-server-cloud: fail to connect to updater', error)
|
||||
|
||||
return connect()
|
||||
})
|
||||
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
|
||||
console.warn('xo-server-cloud: next attempt in %s ms', delay)
|
||||
})
|
||||
connect()
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._unsetApiMethods()
|
||||
this._unsetRequestResource()
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getCatalog({ filters } = {}) {
|
||||
const catalog = await this._updater.call('getResourceCatalog', { filters })
|
||||
|
||||
if (!catalog) {
|
||||
throw new Error('cannot get catalog')
|
||||
}
|
||||
|
||||
return catalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaces() {
|
||||
const catalog = await this._getCatalog()
|
||||
|
||||
if (!catalog._namespaces) {
|
||||
throw new Error('cannot get namespaces')
|
||||
}
|
||||
|
||||
return catalog._namespaces
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _downloadAndInstallResource({ id, namespace, sr, version }) {
|
||||
const stream = await this._requestResource({
|
||||
hub: true,
|
||||
id,
|
||||
namespace,
|
||||
version,
|
||||
})
|
||||
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
|
||||
srId: sr.id,
|
||||
type: 'xva',
|
||||
})
|
||||
await vm.update_other_config({
|
||||
'xo:resource:namespace': namespace,
|
||||
'xo:resource:xva:version': version,
|
||||
'xo:resource:xva:id': id,
|
||||
})
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _registerResource(namespace) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (_namespace === undefined) {
|
||||
throw new Error(`${namespace} is not available`)
|
||||
}
|
||||
|
||||
if (_namespace.registered || _namespace.pending) {
|
||||
throw new Error(`already registered for ${namespace}`)
|
||||
}
|
||||
|
||||
return this._updater.call('registerResource', { namespace })
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaceCatalog({ hub, namespace }) {
|
||||
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
|
||||
namespace
|
||||
]
|
||||
|
||||
if (!namespaceCatalog) {
|
||||
throw new Error(`cannot get catalog: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
return namespaceCatalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _requestResource({ hub = false, id, namespace, version }) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (!hub && (!_namespace || !_namespace.registered)) {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const { _token: token } = await this._getNamespaceCatalog({
|
||||
hub,
|
||||
namespace,
|
||||
})
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
if (!downloadToken) {
|
||||
throw new Error('cannot get download token')
|
||||
}
|
||||
|
||||
const response = await hrp(HTTP_URL, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${downloadToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
// currently needed for XenApi#putResource()
|
||||
response.length = response.headers['content-length']
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new XoServerCloud(opts)
|
||||
@@ -31,7 +31,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.5",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.5",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^2.0.1",
|
||||
"lodash": "^4.17.4"
|
||||
@@ -32,7 +32,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const COMPARATOR_FN = {
|
||||
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
assign(result, {
|
||||
Object.assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
assign(result, {
|
||||
Object.assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
@@ -680,7 +680,7 @@ ${entry.listItem}
|
||||
},
|
||||
}
|
||||
if (xapiObject.$type === 'VM') {
|
||||
payload['vm_uuid'] = xapiObject.uuid
|
||||
payload.vm_uuid = xapiObject.uuid
|
||||
}
|
||||
// JSON is not well formed, can't use the default node parser
|
||||
return JSON5.parse(
|
||||
|
||||
@@ -25,13 +25,13 @@
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
"cross-env": "^6.0.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.97",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"node-openssl-cert": "^0.0.98",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"uuid": "^3.3.2"
|
||||
},
|
||||
"private": true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,202 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { filter, find, forOwn, map, sample } from 'lodash'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:private-network')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
|
||||
const createPassword = () =>
|
||||
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class PrivateNetwork {
|
||||
constructor(controller, uuid) {
|
||||
this.controller = controller
|
||||
this.uuid = uuid
|
||||
this.networks = {}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addHost(host) {
|
||||
if (host.$ref === this.center?.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
log.error('No OVSDB client found', {
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const centerClient = this.controller.ovsdbClients[this.center.$ref]
|
||||
if (centerClient === undefined) {
|
||||
log.error('No OVSDB client found for star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
host: this.center.name_label,
|
||||
pool: this.center.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
const centerNetwork = this.networks[this.center.$pool.uuid]
|
||||
const otherConfig = network.other_config
|
||||
const encapsulation =
|
||||
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
|
||||
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
|
||||
const password =
|
||||
otherConfig['xo:sdn-controller:encrypted'] === 'true'
|
||||
? createPassword()
|
||||
: undefined
|
||||
|
||||
let bridgeName
|
||||
try {
|
||||
;[bridgeName] = await Promise.all([
|
||||
hostClient.addInterfaceAndPort(
|
||||
network,
|
||||
centerClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
centerClient.addInterfaceAndPort(
|
||||
centerNetwork,
|
||||
hostClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
])
|
||||
} catch (error) {
|
||||
log.error('Error while connecting host to private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
log.info('Host added', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
addNetwork(network) {
|
||||
this.networks[network.$pool.uuid] = network
|
||||
log.info('Adding network', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
if (this.center === undefined) {
|
||||
return this.electNewCenter()
|
||||
}
|
||||
|
||||
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
|
||||
return Promise.all(
|
||||
map(hosts, async host => {
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
await this.addHost(host)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async electNewCenter() {
|
||||
delete this.center
|
||||
|
||||
// TODO: make it random
|
||||
const hosts = this._getHosts()
|
||||
for (const host of hosts) {
|
||||
const pif = find(host.$PIFs, {
|
||||
network: this.networks[host.$pool.uuid].$ref,
|
||||
})
|
||||
if (pif?.currently_attached && host.$metrics.live) {
|
||||
this.center = host
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (this.center === undefined) {
|
||||
log.error('No available host to elect new star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await this._reset()
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(map(hosts, host => this.addHost(host)))
|
||||
|
||||
log.info('New star-center elected', {
|
||||
center: this.center.name_label,
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
getPools() {
|
||||
const pools = []
|
||||
forOwn(this.networks, network => {
|
||||
pools.push(network.$pool)
|
||||
})
|
||||
return pools
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_reset() {
|
||||
return Promise.all(
|
||||
map(this._getHosts(), async host => {
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
try {
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
} catch (error) {
|
||||
log.error('Error while resetting private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_getHosts() {
|
||||
const hosts = []
|
||||
forOwn(this.networks, network => {
|
||||
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
}
|
||||
@@ -28,8 +28,7 @@ export class OvsdbClient {
|
||||
|
||||
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
|
||||
- `other_config`:
|
||||
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
|
||||
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
|
||||
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
|
||||
|
||||
Attributes on created OVS interfaces:
|
||||
- `options`:
|
||||
@@ -67,55 +66,49 @@ export class OvsdbClient {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
networkUuid,
|
||||
networkName,
|
||||
network,
|
||||
remoteAddress,
|
||||
encapsulation,
|
||||
key,
|
||||
password,
|
||||
remoteNetwork
|
||||
privateNetworkUuid
|
||||
) {
|
||||
if (
|
||||
this._adding.find(
|
||||
elem => elem.id === networkUuid && elem.addr === remoteAddress
|
||||
elem => elem.id === network.uuid && elem.addr === remoteAddress
|
||||
) !== undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
const adding = { id: networkUuid, addr: remoteAddress }
|
||||
const adding = { id: network.uuid, addr: remoteAddress }
|
||||
this._adding.push(adding)
|
||||
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
bridge,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return bridgeName
|
||||
return bridge.name
|
||||
}
|
||||
|
||||
const index = ++this._numberOfPortAndInterface
|
||||
const interfaceName = bridgeName + '_iface' + index
|
||||
const portName = bridgeName + '_port' + index
|
||||
const interfaceName = bridge.name + '_iface' + index
|
||||
const portName = bridge.name + '_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = { remote_ip: remoteAddress, key: key }
|
||||
@@ -139,11 +132,9 @@ export class OvsdbClient {
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: toMap(
|
||||
remoteNetwork !== undefined
|
||||
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
|
||||
: { 'xo:sdn-controller:private-pool-wide': 'true' }
|
||||
),
|
||||
other_config: toMap({
|
||||
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
|
||||
}),
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
@@ -151,7 +142,7 @@ export class OvsdbClient {
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
@@ -163,7 +154,7 @@ export class OvsdbClient {
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
@@ -189,8 +180,8 @@ export class OvsdbClient {
|
||||
details,
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -200,33 +191,24 @@ export class OvsdbClient {
|
||||
log.debug('Port and interface added to bridge', {
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return bridgeName
|
||||
return bridge.name
|
||||
}
|
||||
|
||||
async resetForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
crossPoolOnly,
|
||||
remoteNetwork
|
||||
) {
|
||||
async resetForNetwork(network, privateNetworkUuid) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
@@ -250,15 +232,14 @@ export class OvsdbClient {
|
||||
// 2019-09-03
|
||||
// Compatibility code, to be removed in 1 year.
|
||||
const oldShouldDelete =
|
||||
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
|
||||
(config[0] === 'cross_pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
config[0] === 'private_pool_wide' ||
|
||||
config[0] === 'cross_pool' ||
|
||||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
|
||||
config[0] === 'xo:sdn-controller:cross-pool'
|
||||
|
||||
const shouldDelete =
|
||||
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
|
||||
!crossPoolOnly) ||
|
||||
(config[0] === 'xo:sdn-controller:cross-pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
config[0] === 'xo:sdn-controller:private-network-uuid' &&
|
||||
config[1] === privateNetworkUuid
|
||||
|
||||
if (shouldDelete || oldShouldDelete) {
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
@@ -275,7 +256,7 @@ export class OvsdbClient {
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
@@ -288,7 +269,7 @@ export class OvsdbClient {
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects[0].error,
|
||||
bridge: bridgeName,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -297,7 +278,7 @@ export class OvsdbClient {
|
||||
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridgeName,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -335,9 +316,9 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
async _getBridgeForNetwork(network, socket) {
|
||||
const where = [
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
@@ -347,25 +328,17 @@ export class OvsdbClient {
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
log.error('No bridge found for network', {
|
||||
network: networkName,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return []
|
||||
return {}
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
return { uuid: selectResult._uuid[1], name: selectResult.name }
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
return false
|
||||
}
|
||||
@@ -393,8 +366,8 @@ export class OvsdbClient {
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
async _getBridgePorts(bridge, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
@@ -36,7 +36,7 @@
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
withOsAndXenTools = ''
|
||||
# vmToBackup = ''
|
||||
|
||||
[templates]
|
||||
|
||||
6
packages/xo-server-test/src/_defaultValues.js
Normal file
6
packages/xo-server-test/src/_defaultValues.js
Normal file
@@ -0,0 +1,6 @@
|
||||
export const getDefaultName = () => `xo-server-test ${new Date().toISOString()}`
|
||||
|
||||
export const getDefaultSchedule = () => ({
|
||||
name: getDefaultName(),
|
||||
cron: '0 * * * * *',
|
||||
})
|
||||
@@ -2,15 +2,11 @@
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { find, forOwn } from 'lodash'
|
||||
import { defaultsDeep, find, forOwn, pick } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
const getDefaultCredentials = () => {
|
||||
const { email, password } = config.xoConnection
|
||||
return { email, password }
|
||||
}
|
||||
import { getDefaultName } from './_defaultValues'
|
||||
|
||||
class XoConnection extends Xo {
|
||||
constructor(opts) {
|
||||
@@ -72,7 +68,10 @@ class XoConnection extends Xo {
|
||||
}
|
||||
|
||||
@defer
|
||||
async connect($defer, credentials = getDefaultCredentials()) {
|
||||
async connect(
|
||||
$defer,
|
||||
credentials = pick(config.xoConnection, 'email', 'password')
|
||||
) {
|
||||
await this.open()
|
||||
$defer.onFailure(() => this.close())
|
||||
|
||||
@@ -111,9 +110,26 @@ class XoConnection extends Xo {
|
||||
}
|
||||
|
||||
async createTempBackupNgJob(params) {
|
||||
const job = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
|
||||
return job
|
||||
// mutate and inject default values
|
||||
defaultsDeep(params, {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
settings: {
|
||||
'': {
|
||||
// it must be enabled because the XAPI might be not able to coalesce VDIs
|
||||
// as fast as the tests run
|
||||
//
|
||||
// see https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
bypassVdiChainsCheck: true,
|
||||
|
||||
// it must be 'never' to avoid race conditions with the plugin `backup-reports`
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
})
|
||||
const id = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id })
|
||||
return this.call('backupNg.getJob', { id })
|
||||
}
|
||||
|
||||
async createTempNetwork(params) {
|
||||
@@ -128,7 +144,7 @@ class XoConnection extends Xo {
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', {
|
||||
name_label: 'XO Test',
|
||||
name_label: getDefaultName(),
|
||||
template: config.templates.templateWithoutDisks,
|
||||
...params,
|
||||
})
|
||||
@@ -138,6 +154,19 @@ class XoConnection extends Xo {
|
||||
})
|
||||
}
|
||||
|
||||
async startTempVm(id, params, withXenTools = false) {
|
||||
await this.call('vm.start', { id, ...params })
|
||||
this._tempResourceDisposers.push('vm.stop', { id, force: true })
|
||||
return this.waitObjectState(id, vm => {
|
||||
if (
|
||||
vm.power_state !== 'Running' ||
|
||||
(withXenTools && vm.xenTools === false)
|
||||
) {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async createTempRemote(params) {
|
||||
const remote = await this.call('remote.create', params)
|
||||
this._tempResourceDisposers.push('remote.delete', { id: remote.id })
|
||||
|
||||
@@ -1,61 +1,6 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Any<Object>,
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
|
||||
Object {
|
||||
"cron": "0 * * * * *",
|
||||
"enabled": false,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"name": "scheduleTest",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Object {
|
||||
"": Object {
|
||||
"reportWhen": "never",
|
||||
},
|
||||
},
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -92,23 +37,6 @@ Array [
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -127,23 +55,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -157,7 +69,69 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -168,7 +142,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -183,6 +157,19 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -197,19 +184,6 @@ Object {
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -224,6 +198,19 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -238,35 +225,6 @@ Object {
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -280,7 +238,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -291,7 +249,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -306,7 +264,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -319,6 +277,34 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -334,62 +320,18 @@ Object {
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -403,7 +345,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -414,6 +356,47 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
@@ -455,65 +438,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 25`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 26`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 27`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -524,7 +449,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
|
||||
@@ -6,20 +6,44 @@ import { noSuchObject } from 'xo-common/api-errors'
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo from '../_xoConnection'
|
||||
import { getDefaultName, getDefaultSchedule } from '../_defaultValues'
|
||||
|
||||
const DEFAULT_SCHEDULE = {
|
||||
name: 'scheduleTest',
|
||||
cron: '0 * * * * *',
|
||||
const validateBackupJob = (jobInput, jobOutput, createdSchedule) => {
|
||||
const expectedObj = {
|
||||
id: expect.any(String),
|
||||
mode: jobInput.mode,
|
||||
name: jobInput.name,
|
||||
type: 'backup',
|
||||
settings: {
|
||||
'': jobInput.settings[''],
|
||||
},
|
||||
userId: xo._user.id,
|
||||
vms: jobInput.vms,
|
||||
}
|
||||
|
||||
const schedules = jobInput.schedules
|
||||
if (schedules !== undefined) {
|
||||
const scheduleTmpId = Object.keys(schedules)[0]
|
||||
expect(createdSchedule).toEqual({
|
||||
...schedules[scheduleTmpId],
|
||||
enabled: false,
|
||||
id: expect.any(String),
|
||||
jobId: jobOutput.id,
|
||||
})
|
||||
|
||||
expectedObj.settings[createdSchedule.id] = jobInput.settings[scheduleTmpId]
|
||||
}
|
||||
|
||||
expect(jobOutput).toEqual(expectedObj)
|
||||
}
|
||||
|
||||
const validateRootTask = (log, props) =>
|
||||
expect(log).toMatchSnapshot({
|
||||
const validateRootTask = (log, expected) =>
|
||||
expect(log).toEqual({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
message: 'backup',
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
...expected,
|
||||
})
|
||||
|
||||
const validateVmTask = (task, vmId, props) => {
|
||||
@@ -66,88 +90,55 @@ const validateOperationTask = (task, props) => {
|
||||
})
|
||||
}
|
||||
|
||||
// Note: `bypassVdiChainsCheck` must be enabled because the XAPI might be not
|
||||
// able to coalesce VDIs as fast as the tests run.
|
||||
//
|
||||
// See https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
describe('backupNg', () => {
|
||||
let defaultBackupNg
|
||||
|
||||
beforeAll(() => {
|
||||
defaultBackupNg = {
|
||||
name: 'default-backupNg',
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.createJob() :', () => {
|
||||
it('creates a new backup job without schedules', async () => {
|
||||
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
expect(backupNg).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNg.userId).toBe(xo._user.id)
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(jobInput, jobOutput)
|
||||
})
|
||||
|
||||
it('creates a new backup job with schedules', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
|
||||
|
||||
expect(backupNgJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
settings: expect.any(Object),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNgJob.userId).toBe(xo._user.id)
|
||||
|
||||
expect(Object.keys(backupNgJob.settings).length).toBe(2)
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
expect(backupNgJob.settings[schedule.id]).toEqual({
|
||||
snapshotRetention: 1,
|
||||
})
|
||||
|
||||
expect(schedule).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
})
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(
|
||||
jobInput,
|
||||
jobOutput,
|
||||
await xo.getSchedule({ jobId: jobOutput.id })
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a backup job', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.call('backupNg.createJob', {
|
||||
...defaultBackupNg,
|
||||
const jobId = await xo.call('backupNg.createJob', {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
@@ -173,16 +164,19 @@ describe('backupNg', () => {
|
||||
|
||||
describe('.runJob() :', () => {
|
||||
it('fails trying to run a backup job without schedule', async () => {
|
||||
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
const { id } = await xo.createTempBackupNgJob({
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
})
|
||||
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with no matching VMs', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
@@ -205,9 +199,8 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
@@ -231,25 +224,23 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(8e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: vmIdWithoutDisks } = await xo.createTempVm({
|
||||
name_label: 'XO Test Without Disks',
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.templateWithoutDisks,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: vmIdWithoutDisks,
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -264,12 +255,16 @@ describe('backupNg', () => {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'skipped',
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
@@ -293,22 +288,24 @@ describe('backupNg', () => {
|
||||
const scheduleTempId = randomId()
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: {},
|
||||
},
|
||||
srs: {
|
||||
id: config.srs.default,
|
||||
},
|
||||
})
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -324,12 +321,15 @@ describe('backupNg', () => {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'failure',
|
||||
})
|
||||
|
||||
expect(task).toMatchSnapshot({
|
||||
@@ -352,7 +352,6 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(6e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
let vm = await xo.createTempVm({
|
||||
name_label: 'XO Test Temp',
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
VDIs: [
|
||||
@@ -365,22 +364,18 @@ describe('backupNg', () => {
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -420,12 +415,15 @@ describe('backupNg', () => {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const subTaskSnapshot = subTasks.find(
|
||||
@@ -470,7 +468,7 @@ describe('backupNg', () => {
|
||||
const exportRetention = 2
|
||||
const fullInterval = 2
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
const jobInput = {
|
||||
mode: 'delta',
|
||||
remotes: {
|
||||
id: {
|
||||
@@ -478,13 +476,11 @@ describe('backupNg', () => {
|
||||
},
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
fullInterval,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[remoteId1]: { deleteFirst: true },
|
||||
[scheduleTempId]: { exportRetention },
|
||||
@@ -492,7 +488,8 @@ describe('backupNg', () => {
|
||||
vms: {
|
||||
id: vmToBackup,
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -515,10 +512,12 @@ describe('backupNg', () => {
|
||||
backupLogs.forEach(({ tasks = [], ...log }, key) => {
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: 'delta',
|
||||
reportWhen: 'never',
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
message: 'backup',
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
@@ -585,4 +584,110 @@ describe('backupNg', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('create and execute backup with enabled offline backup', async () => {
|
||||
const vm = xo.objects.all[config.vms.withOsAndXenTools]
|
||||
if (vm.power_state !== 'Running') {
|
||||
await xo.startTempVm(vm.id, { force: true }, true)
|
||||
}
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const srId = config.srs.default
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const backupInput = {
|
||||
mode: 'full',
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
offlineBackup: true,
|
||||
},
|
||||
[scheduleTempId]: {
|
||||
copyRetention: 1,
|
||||
exportRetention: 1,
|
||||
},
|
||||
},
|
||||
srs: {
|
||||
id: srId,
|
||||
},
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
}
|
||||
const backup = await xo.createTempBackupNgJob(backupInput)
|
||||
expect(backup.settings[''].offlineBackup).toBe(true)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId: backup.id })
|
||||
|
||||
await Promise.all([
|
||||
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
|
||||
xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Halted') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Running') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
|
||||
const backupLogs = await xo.getBackupLogs({
|
||||
jobId: backup.id,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(backupLogs.length).toBe(1)
|
||||
|
||||
const { tasks, ...log } = backupLogs[0]
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: backupInput.mode,
|
||||
reportWhen: backupInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId: backup.id,
|
||||
jobName: backupInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...vmTask }) => {
|
||||
validateVmTask(vmTask, vm.id, { status: 'success' })
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...subTask }) => {
|
||||
expect(subTask.message).not.toBe('snapshot')
|
||||
|
||||
if (subTask.message === 'export') {
|
||||
validateExportTask(
|
||||
subTask,
|
||||
subTask.data.type === 'remote' ? remoteId : srId,
|
||||
{
|
||||
data: expect.any(Object),
|
||||
status: 'success',
|
||||
}
|
||||
)
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(operationTask => {
|
||||
if (
|
||||
operationTask.message === 'transfer' ||
|
||||
operationTask.message === 'merge'
|
||||
) {
|
||||
validateOperationTask(operationTask, {
|
||||
result: { size: expect.any(Number) },
|
||||
status: 'success',
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}, 200e3)
|
||||
})
|
||||
|
||||
@@ -6,7 +6,7 @@ import expect from 'must'
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
|
||||
import { map, assign } from 'lodash'
|
||||
import { map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
@@ -27,7 +27,7 @@ describe('disk', () => {
|
||||
const config = await getConfig()
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { assign, find, map } from 'lodash'
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, xo } from './util'
|
||||
|
||||
@@ -151,7 +151,7 @@ describe('server', () => {
|
||||
|
||||
it('connects to a Xen server', async () => {
|
||||
const serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
|
||||
await xo.call('server.connect', {
|
||||
@@ -184,7 +184,7 @@ describe('server', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
getOneHost,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { assign, map } from 'lodash'
|
||||
import { map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
@@ -33,7 +33,7 @@ describe('vbd', () => {
|
||||
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
@@ -34,14 +34,14 @@
|
||||
"dependencies": {
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
# xo-server-cloud [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# xo-server-transport-icinga2 [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
> xo-server plugin to send status to icinga2 server
|
||||
|
||||
## Install
|
||||
|
||||
@@ -11,6 +13,13 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
|
||||
|
||||
## Development
|
||||
|
||||
### `Xo#sendIcinga2Status({ status, message })`
|
||||
|
||||
This xo method is called to send a passive check to icinga2 and change the status of a service.
|
||||
It has two parameters:
|
||||
- status: it's the service status in icinga2 (0: OK | 1: WARNING | 2: CRITICAL | 3: UNKNOWN).
|
||||
- message: it's the status information in icinga2.
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> npm install
|
||||
32
packages/xo-server-transport-icinga2/package.json
Normal file
32
packages/xo-server-transport-icinga2/package.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "xo-server-transport-icinga2",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-transport-icinga2",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-transport-icinga2",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"main": "./dist",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.9.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^6.0.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
136
packages/xo-server-transport-icinga2/src/index.js
Normal file
136
packages/xo-server-transport-icinga2/src/index.js
Normal file
@@ -0,0 +1,136 @@
|
||||
import assert from 'assert'
|
||||
import { URL } from 'url'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
properties: {
|
||||
server: {
|
||||
type: 'string',
|
||||
description: `
|
||||
The icinga2 server http/https address.
|
||||
|
||||
*If no port is provided in the URL, 5665 will be used.*
|
||||
|
||||
Examples:
|
||||
- https://icinga2.example.com
|
||||
- http://192.168.0.1:1234
|
||||
`.trim(),
|
||||
},
|
||||
user: {
|
||||
type: 'string',
|
||||
description: 'The icinga2 server username',
|
||||
},
|
||||
password: {
|
||||
type: 'string',
|
||||
description: 'The icinga2 server password',
|
||||
},
|
||||
filter: {
|
||||
type: 'string',
|
||||
description: `
|
||||
The filter to use
|
||||
|
||||
See: https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/#filters
|
||||
|
||||
Example:
|
||||
- Monitor the backup jobs of the VMs of a specific host:
|
||||
|
||||
\`host.name=="xoa.example.com" && service.name=="xo-backup"\`
|
||||
`.trim(),
|
||||
},
|
||||
acceptUnauthorized: {
|
||||
type: 'boolean',
|
||||
description: 'Accept unauthorized certificates',
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
required: ['server'],
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const STATUS_MAP = {
|
||||
OK: 0,
|
||||
WARNING: 1,
|
||||
CRITICAL: 2,
|
||||
UNKNOWN: 3,
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
class XoServerIcinga2 {
|
||||
constructor({ xo }) {
|
||||
this._xo = xo
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
configure(configuration) {
|
||||
const serverUrl = new URL(configuration.server)
|
||||
if (configuration.user !== '') {
|
||||
serverUrl.username = configuration.user
|
||||
}
|
||||
if (configuration.password !== '') {
|
||||
serverUrl.password = configuration.password
|
||||
}
|
||||
if (serverUrl.port === '') {
|
||||
serverUrl.port = '5665' // Default icinga2 access port
|
||||
}
|
||||
serverUrl.pathname = '/v1/actions/process-check-result'
|
||||
this._url = serverUrl.href
|
||||
|
||||
this._filter =
|
||||
configuration.filter !== undefined ? configuration.filter : ''
|
||||
this._acceptUnauthorized = configuration.acceptUnauthorized
|
||||
}
|
||||
|
||||
load() {
|
||||
this._unset = this._xo.defineProperty(
|
||||
'sendIcinga2Status',
|
||||
this._sendIcinga2Status,
|
||||
this
|
||||
)
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._unset()
|
||||
}
|
||||
|
||||
test() {
|
||||
return this._sendIcinga2Status({
|
||||
message:
|
||||
'The server-icinga2 plugin for Xen Orchestra server seems to be working fine, nicely done :)',
|
||||
status: 'OK',
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_sendIcinga2Status({ message, status }) {
|
||||
const icinga2Status = STATUS_MAP[status]
|
||||
assert(icinga2Status !== undefined, `Invalid icinga2 status: ${status}`)
|
||||
return this._xo
|
||||
.httpRequest(this._url, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
rejectUnauthorized: !this._acceptUnauthorized,
|
||||
body: JSON.stringify({
|
||||
type: 'Service',
|
||||
filter: this._filter,
|
||||
plugin_output: message,
|
||||
exit_status: icinga2Status,
|
||||
}),
|
||||
})
|
||||
.readAll()
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default opts => new XoServerIcinga2(opts)
|
||||
@@ -39,7 +39,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -33,14 +33,14 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"slack-node": "^0.1.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -36,20 +36,20 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.5",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -5,7 +5,6 @@ import humanFormat from 'human-format'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { minify } from 'html-minifier'
|
||||
import {
|
||||
assign,
|
||||
concat,
|
||||
differenceBy,
|
||||
filter,
|
||||
@@ -418,7 +417,7 @@ function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
|
||||
}))
|
||||
)
|
||||
|
||||
return assign(
|
||||
return Object.assign(
|
||||
computeMeans(vmsStats, [
|
||||
'cpu',
|
||||
'ram',
|
||||
@@ -446,7 +445,7 @@ function computeGlobalHostsStats({ haltedHosts, hostsStats, xo }) {
|
||||
}))
|
||||
)
|
||||
|
||||
return assign(
|
||||
return Object.assign(
|
||||
computeMeans(hostsStats, [
|
||||
'cpu',
|
||||
'ram',
|
||||
|
||||
@@ -30,12 +30,12 @@
|
||||
"bin": "bin"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.5",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
@@ -58,16 +58,15 @@
|
||||
"debug": "^4.0.1",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"deptree": "^1.0.0",
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^2.0.5",
|
||||
"express": "^4.16.2",
|
||||
"express-session": "^1.15.6",
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.7.1",
|
||||
"helmet": "^3.9.0",
|
||||
@@ -91,7 +90,7 @@
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"make-error": "^1",
|
||||
"micromatch": "^3.1.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"minimist": "^1.2.0",
|
||||
"moment-timezone": "^0.5.14",
|
||||
"ms": "^2.1.1",
|
||||
@@ -103,7 +102,7 @@
|
||||
"passport": "^0.4.0",
|
||||
"passport-local": "^1.0.0",
|
||||
"pretty-format": "^24.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"proxy-agent": "^3.0.0",
|
||||
"pug": "^2.0.0-rc.4",
|
||||
"pump": "^3.0.0",
|
||||
@@ -123,7 +122,7 @@
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.7.0",
|
||||
"ws": "^6.0.0",
|
||||
"ws": "^7.1.2",
|
||||
"xen-api": "^0.27.2",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
@@ -148,7 +147,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-dev": "^2.0.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -8,7 +8,7 @@ import { safeDateFormat } from '../utils'
|
||||
|
||||
export function createJob({ schedules, ...job }) {
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules)
|
||||
return this.createBackupNgJob(job, schedules).then(({ id }) => id)
|
||||
}
|
||||
|
||||
createJob.permission = 'admin'
|
||||
|
||||
@@ -777,7 +777,7 @@ export async function probeIscsiExists({
|
||||
)
|
||||
|
||||
const srs = []
|
||||
forEach(ensureArray(xml['SRlist'].SR), sr => {
|
||||
forEach(ensureArray(xml.SRlist.SR), sr => {
|
||||
// get the UUID of SR connected to this LUN
|
||||
srs.push({ uuid: sr.UUID.trim() })
|
||||
})
|
||||
@@ -845,7 +845,7 @@ export async function probeNfsExists({ host, server, serverPath }) {
|
||||
|
||||
const srs = []
|
||||
|
||||
forEach(ensureArray(xml['SRlist'].SR), sr => {
|
||||
forEach(ensureArray(xml.SRlist.SR), sr => {
|
||||
// get the UUID of SR connected to this LUN
|
||||
srs.push({ uuid: sr.UUID.trim() })
|
||||
})
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// FIXME: rename to disk.*
|
||||
|
||||
import { invalidParameters } from 'xo-common/api-errors'
|
||||
import { isArray, reduce } from 'lodash'
|
||||
import { reduce } from 'lodash'
|
||||
|
||||
import { parseSize } from '../utils'
|
||||
|
||||
@@ -85,7 +85,7 @@ export async function set(params) {
|
||||
continue
|
||||
}
|
||||
|
||||
for (const field of isArray(fields) ? fields : [fields]) {
|
||||
for (const field of Array.isArray(fields) ? fields : [fields]) {
|
||||
await xapi.call(`VDI.set_${field}`, ref, `${params[param]}`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ async function rateLimitedRetry(action, shouldRetry, retryCount = 20) {
|
||||
function createVolumeInfoTypes() {
|
||||
function parseHeal(parsed) {
|
||||
const bricks = []
|
||||
parsed['healInfo']['bricks']['brick'].forEach(brick => {
|
||||
parsed.healInfo.bricks.brick.forEach(brick => {
|
||||
bricks.push(brick)
|
||||
if (brick.file) {
|
||||
brick.file = ensureArray(brick.file)
|
||||
@@ -96,21 +96,21 @@ function createVolumeInfoTypes() {
|
||||
|
||||
function parseStatus(parsed) {
|
||||
const brickDictByUuid = {}
|
||||
const volume = parsed['volStatus']['volumes']['volume']
|
||||
volume['node'].forEach(node => {
|
||||
const volume = parsed.volStatus.volumes.volume
|
||||
volume.node.forEach(node => {
|
||||
brickDictByUuid[node.peerid] = brickDictByUuid[node.peerid] || []
|
||||
brickDictByUuid[node.peerid].push(node)
|
||||
})
|
||||
return {
|
||||
commandStatus: true,
|
||||
result: { nodes: brickDictByUuid, tasks: volume['tasks'] },
|
||||
result: { nodes: brickDictByUuid, tasks: volume.tasks },
|
||||
}
|
||||
}
|
||||
|
||||
async function parseInfo(parsed) {
|
||||
const volume = parsed['volInfo']['volumes']['volume']
|
||||
volume['bricks'] = volume['bricks']['brick']
|
||||
volume['options'] = volume['options']['option']
|
||||
const volume = parsed.volInfo.volumes.volume
|
||||
volume.bricks = volume.bricks.brick
|
||||
volume.options = volume.options.option
|
||||
return { commandStatus: true, result: volume }
|
||||
}
|
||||
|
||||
@@ -118,23 +118,23 @@ function createVolumeInfoTypes() {
|
||||
return async function(sr) {
|
||||
const glusterEndpoint = this::_getGlusterEndpoint(sr)
|
||||
const cmdShouldRetry = result =>
|
||||
!result['commandStatus'] &&
|
||||
((result.parsed && result.parsed['cliOutput']['opErrno'] === '30802') ||
|
||||
!result.commandStatus &&
|
||||
((result.parsed && result.parsed.cliOutput.opErrno === '30802') ||
|
||||
result.stderr.match(/Another transaction is in progress/))
|
||||
const runCmd = async () =>
|
||||
glusterCmd(glusterEndpoint, 'volume ' + command, true)
|
||||
const commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry, 30)
|
||||
return commandResult['commandStatus']
|
||||
? this::handler(commandResult.parsed['cliOutput'], sr)
|
||||
return commandResult.commandStatus
|
||||
? this::handler(commandResult.parsed.cliOutput, sr)
|
||||
: commandResult
|
||||
}
|
||||
}
|
||||
|
||||
async function profileType(sr) {
|
||||
async function parseProfile(parsed) {
|
||||
const volume = parsed['volProfile']
|
||||
volume['bricks'] = ensureArray(volume['brick'])
|
||||
delete volume['brick']
|
||||
const volume = parsed.volProfile
|
||||
volume.bricks = ensureArray(volume.brick)
|
||||
delete volume.brick
|
||||
return { commandStatus: true, result: volume }
|
||||
}
|
||||
|
||||
@@ -143,9 +143,9 @@ function createVolumeInfoTypes() {
|
||||
|
||||
async function profileTopType(sr) {
|
||||
async function parseTop(parsed) {
|
||||
const volume = parsed['volTop']
|
||||
volume['bricks'] = ensureArray(volume['brick'])
|
||||
delete volume['brick']
|
||||
const volume = parsed.volTop
|
||||
volume.bricks = ensureArray(volume.brick)
|
||||
delete volume.brick
|
||||
return { commandStatus: true, result: volume }
|
||||
}
|
||||
|
||||
@@ -326,7 +326,7 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
|
||||
}
|
||||
messageArray.push(`${key}: ${result[key]}`)
|
||||
}
|
||||
messageArray.push('command: ' + result['command'].join(' '))
|
||||
messageArray.push('command: ' + result.command.join(' '))
|
||||
messageKeys.splice(messageKeys.indexOf('command'), 1)
|
||||
for (const key of messageKeys) {
|
||||
messageArray.push(`${key}: ${JSON.stringify(result[key])}`)
|
||||
@@ -343,7 +343,7 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
|
||||
})
|
||||
break
|
||||
} catch (exception) {
|
||||
if (exception['code'] !== 'HOST_OFFLINE') {
|
||||
if (exception.code !== 'HOST_OFFLINE') {
|
||||
throw exception
|
||||
}
|
||||
}
|
||||
@@ -370,19 +370,17 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
|
||||
}
|
||||
|
||||
function findErrorMessage(commandResut) {
|
||||
if (commandResut['exit'] === 0 && commandResut.parsed) {
|
||||
const cliOut = commandResut.parsed['cliOutput']
|
||||
if (cliOut['opErrstr'] && cliOut['opErrstr'].length) {
|
||||
return cliOut['opErrstr']
|
||||
if (commandResut.exit === 0 && commandResut.parsed) {
|
||||
const cliOut = commandResut.parsed.cliOutput
|
||||
if (cliOut.opErrstr && cliOut.opErrstr.length) {
|
||||
return cliOut.opErrstr
|
||||
}
|
||||
// "peer probe" returns it's "already in peer" error in cliOutput/output
|
||||
if (cliOut['output'] && cliOut['output'].length) {
|
||||
return cliOut['output']
|
||||
if (cliOut.output && cliOut.output.length) {
|
||||
return cliOut.output
|
||||
}
|
||||
}
|
||||
return commandResut['stderr'].length
|
||||
? commandResut['stderr']
|
||||
: commandResut['stdout']
|
||||
return commandResut.stderr.length ? commandResut.stderr : commandResut.stdout
|
||||
}
|
||||
|
||||
async function glusterCmd(glusterEndpoint, cmd, ignoreError = false) {
|
||||
@@ -392,15 +390,15 @@ async function glusterCmd(glusterEndpoint, cmd, ignoreError = false) {
|
||||
true
|
||||
)
|
||||
try {
|
||||
result.parsed = parseXml(result['stdout'])
|
||||
result.parsed = parseXml(result.stdout)
|
||||
} catch (e) {
|
||||
// pass, we never know if a message can be parsed or not, so we just try
|
||||
}
|
||||
if (result['exit'] === 0) {
|
||||
const cliOut = result.parsed['cliOutput']
|
||||
if (result.exit === 0) {
|
||||
const cliOut = result.parsed.cliOutput
|
||||
// we have found cases where opErrno is !=0 and opRet was 0, albeit the operation was an error.
|
||||
result.commandStatus =
|
||||
cliOut['opRet'].trim() === '0' && cliOut['opErrno'].trim() === '0'
|
||||
cliOut.opRet.trim() === '0' && cliOut.opErrno.trim() === '0'
|
||||
result.error = findErrorMessage(result)
|
||||
} else {
|
||||
result.commandStatus = false
|
||||
@@ -793,7 +791,7 @@ export const createSR = defer(async function(
|
||||
host: param.host.$id,
|
||||
vm: { id: param.vm.$id, ip: param.address },
|
||||
underlyingSr: param.underlyingSr.$id,
|
||||
arbiter: !!param['arbiter'],
|
||||
arbiter: !!param.arbiter,
|
||||
}))
|
||||
await xapi.xo.setData(xosanSrRef, 'xosan_config', {
|
||||
version: 'beta2',
|
||||
@@ -1300,7 +1298,7 @@ export const addBricks = defer(async function(
|
||||
underlyingSr: newSr,
|
||||
})
|
||||
}
|
||||
const arbiterNode = data.nodes.find(n => n['arbiter'])
|
||||
const arbiterNode = data.nodes.find(n => n.arbiter)
|
||||
if (arbiterNode) {
|
||||
await glusterCmd(
|
||||
glusterEndpoint,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import Model from './model'
|
||||
import { BaseError } from 'make-error'
|
||||
import { EventEmitter } from 'events'
|
||||
import { isArray, isObject, map } from './utils'
|
||||
import { isObject, map } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -30,7 +30,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
async add(models, opts) {
|
||||
const array = isArray(models)
|
||||
const array = Array.isArray(models)
|
||||
if (!array) {
|
||||
models = [models]
|
||||
}
|
||||
@@ -66,7 +66,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
async remove(ids) {
|
||||
if (!isArray(ids)) {
|
||||
if (!Array.isArray(ids)) {
|
||||
ids = [ids]
|
||||
}
|
||||
|
||||
@@ -77,8 +77,8 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
async update(models) {
|
||||
const array = isArray(models)
|
||||
if (!isArray(models)) {
|
||||
const array = Array.isArray(models)
|
||||
if (!array) {
|
||||
models = [models]
|
||||
}
|
||||
|
||||
|
||||
@@ -29,13 +29,7 @@ import { ensureDir, readdir, readFile } from 'fs-extra'
|
||||
|
||||
import parseDuration from './_parseDuration'
|
||||
import Xo from './xo'
|
||||
import {
|
||||
forEach,
|
||||
isArray,
|
||||
isFunction,
|
||||
mapToArray,
|
||||
pFromCallback,
|
||||
} from './utils'
|
||||
import { forEach, mapToArray, pFromCallback } from './utils'
|
||||
|
||||
import bodyParser from 'body-parser'
|
||||
import connectFlash from 'connect-flash'
|
||||
@@ -281,15 +275,16 @@ async function registerPlugin(pluginPath, pluginName) {
|
||||
|
||||
// The default export can be either a factory or directly a plugin
|
||||
// instance.
|
||||
const instance = isFunction(factory)
|
||||
? factory({
|
||||
xo: this,
|
||||
getDataDir: () => {
|
||||
const dir = `${this._config.datadir}/${pluginName}`
|
||||
return ensureDir(dir).then(() => dir)
|
||||
},
|
||||
})
|
||||
: factory
|
||||
const instance =
|
||||
typeof factory === 'function'
|
||||
? factory({
|
||||
xo: this,
|
||||
getDataDir: () => {
|
||||
const dir = `${this._config.datadir}/${pluginName}`
|
||||
return ensureDir(dir).then(() => dir)
|
||||
},
|
||||
})
|
||||
: factory
|
||||
|
||||
await this.registerPlugin(
|
||||
pluginName,
|
||||
@@ -468,7 +463,7 @@ const setUpProxies = (express, opts, xo) => {
|
||||
|
||||
const setUpStaticFiles = (express, opts) => {
|
||||
forEach(opts, (paths, url) => {
|
||||
if (!isArray(paths)) {
|
||||
if (!Array.isArray(paths)) {
|
||||
paths = [paths]
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import ndjson from 'ndjson'
|
||||
import parseArgs from 'minimist'
|
||||
import sublevel from 'level-sublevel'
|
||||
import util from 'util'
|
||||
import { join as joinPath } from 'path'
|
||||
import { repair as repairDb } from 'level'
|
||||
|
||||
import { forEach } from './utils'
|
||||
@@ -174,6 +175,7 @@ export default async function main() {
|
||||
}
|
||||
|
||||
const config = await appConf.load('xo-server', {
|
||||
appDir: joinPath(__dirname, '..'),
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
|
||||
|
||||
@@ -8,19 +8,21 @@ const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
return splitLines(
|
||||
await execa.stdout(command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
const { stdout } = await execa(command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
|
||||
return splitLines(stdout).map(
|
||||
Array.isArray(fields) ? parse : line => parse(line)[fields]
|
||||
)
|
||||
}
|
||||
|
||||
export const lvs = makeFunction('lvs')
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import assign from 'lodash/assign'
|
||||
|
||||
const _combine = (vectors, n, cb) => {
|
||||
if (!n) {
|
||||
return
|
||||
@@ -35,7 +33,7 @@ export const combine = vectors => cb => _combine(vectors, vectors.length, cb)
|
||||
// Merge the properties of an objects set in one object.
|
||||
//
|
||||
// Ex: mergeObjects([ { a: 1 }, { b: 2 } ]) => { a: 1, b: 2 }
|
||||
export const mergeObjects = objects => assign({}, ...objects)
|
||||
export const mergeObjects = objects => Object.assign({}, ...objects)
|
||||
|
||||
// Compute a cross product between vectors.
|
||||
//
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { EventEmitter } from 'events'
|
||||
|
||||
import { forEach, isEmpty, isString } from './utils'
|
||||
import { forEach, isEmpty } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -30,7 +30,7 @@ export default class Model extends EventEmitter {
|
||||
set(properties, value) {
|
||||
// This method can also be used with two arguments to set a single
|
||||
// property.
|
||||
if (isString(properties)) {
|
||||
if (typeof properties === 'string') {
|
||||
properties = { [properties]: value }
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import appConf from 'app-conf'
|
||||
import pw from 'pw'
|
||||
import { join as joinPath } from 'path'
|
||||
|
||||
import Xo from './xo'
|
||||
import { generateToken } from './utils'
|
||||
@@ -26,6 +27,7 @@ xo-server-recover-account <user name or email>
|
||||
|
||||
const xo = new Xo(
|
||||
await appConf.load('xo-server', {
|
||||
appDir: joinPath(__dirname, '..'),
|
||||
ignoreUnknownFormats: true,
|
||||
})
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ import forEach from 'lodash/forEach'
|
||||
import has from 'lodash/has'
|
||||
import highland from 'highland'
|
||||
import humanFormat from 'human-format'
|
||||
import isString from 'lodash/isString'
|
||||
import keys from 'lodash/keys'
|
||||
import multiKeyHashInt from 'multikey-hash'
|
||||
import pick from 'lodash/pick'
|
||||
@@ -208,7 +207,7 @@ export {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function parseSize(size) {
|
||||
if (!isString(size)) {
|
||||
if (typeof size !== 'string') {
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -256,13 +255,9 @@ export const safeDateParse = utcParse('%Y%m%dT%H%M%SZ')
|
||||
//
|
||||
// Exports them from here to avoid direct dependencies on lodash/
|
||||
export { default as forEach } from 'lodash/forEach'
|
||||
export { default as isArray } from 'lodash/isArray'
|
||||
export { default as isBoolean } from 'lodash/isBoolean'
|
||||
export { default as isEmpty } from 'lodash/isEmpty'
|
||||
export { default as isFunction } from 'lodash/isFunction'
|
||||
export { default as isInteger } from 'lodash/isInteger'
|
||||
export { default as isObject } from 'lodash/isObject'
|
||||
export { default as isString } from 'lodash/isString'
|
||||
export { default as mapToArray } from 'lodash/map'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -364,7 +359,7 @@ export const thunkToArray = thunk => {
|
||||
// function foo (param = throwFn('param is required')()) {}
|
||||
// ```
|
||||
export const throwFn = error => () => {
|
||||
throw isString(error) ? new Error(error) : error
|
||||
throw typeof error === 'string' ? new Error(error) : error
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -3,7 +3,6 @@ import ensureArray from './_ensureArray'
|
||||
import {
|
||||
extractProperty,
|
||||
forEach,
|
||||
isArray,
|
||||
isEmpty,
|
||||
mapFilter,
|
||||
mapToArray,
|
||||
@@ -27,7 +26,7 @@ function link(obj, prop, idField = '$id') {
|
||||
return dynamicValue // Properly handles null and undefined.
|
||||
}
|
||||
|
||||
if (isArray(dynamicValue)) {
|
||||
if (Array.isArray(dynamicValue)) {
|
||||
return mapToArray(dynamicValue, idField)
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ import pRetry from '../_pRetry'
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
forEach,
|
||||
isFunction,
|
||||
map,
|
||||
mapToArray,
|
||||
pAll,
|
||||
@@ -82,7 +81,7 @@ export const TAG_COPY_SRC = 'xo:copy_of'
|
||||
|
||||
// FIXME: remove this work around when fixed, https://phabricator.babeljs.io/T2877
|
||||
// export * from './utils'
|
||||
require('lodash/assign')(module.exports, require('./utils'))
|
||||
Object.assign(module.exports, require('./utils'))
|
||||
|
||||
// VDI formats. (Raw is not available for delta vdi.)
|
||||
export const VDI_FORMAT_VHD = 'vhd'
|
||||
@@ -174,7 +173,7 @@ export default class Xapi extends XapiBase {
|
||||
//
|
||||
// TODO: implements a timeout.
|
||||
_waitObject(predicate) {
|
||||
if (isFunction(predicate)) {
|
||||
if (typeof predicate === 'function') {
|
||||
const { promise, resolve } = defer()
|
||||
|
||||
const unregister = this._registerGenericWatcher(obj => {
|
||||
@@ -1576,7 +1575,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
} else {
|
||||
// Find the original template by name (*sigh*).
|
||||
const templateNameLabel = vm.other_config['base_template_name']
|
||||
const templateNameLabel = vm.other_config.base_template_name
|
||||
const template =
|
||||
templateNameLabel &&
|
||||
find(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import deferrable from 'golike-defer'
|
||||
import unzip from 'julien-f-unzip'
|
||||
@@ -337,7 +336,7 @@ export default {
|
||||
|
||||
// INSTALL -------------------------------------------------------------------
|
||||
|
||||
_xcpUpdate(hosts) {
|
||||
async _xcpUpdate(hosts) {
|
||||
if (hosts === undefined) {
|
||||
hosts = filter(this.objects.all, { $type: 'host' })
|
||||
} else {
|
||||
@@ -347,7 +346,10 @@ export default {
|
||||
)
|
||||
}
|
||||
|
||||
return asyncMap(hosts, async host => {
|
||||
// XCP-ng hosts need to be updated one at a time starting with the pool master
|
||||
// https://github.com/vatesfr/xen-orchestra/issues/4468
|
||||
hosts = hosts.sort(({ $ref }) => ($ref === this.pool.master ? -1 : 1))
|
||||
for (const host of hosts) {
|
||||
const update = await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
@@ -364,7 +366,7 @@ export default {
|
||||
String(Date.now() / 1000)
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
// Legacy XS patches: upload a patch on a pool before installing it
|
||||
|
||||
@@ -9,11 +9,7 @@ import { satisfies as versionSatisfies } from 'semver'
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
forEach,
|
||||
isArray,
|
||||
isBoolean,
|
||||
isFunction,
|
||||
isInteger,
|
||||
isString,
|
||||
map,
|
||||
mapFilter,
|
||||
mapToArray,
|
||||
@@ -45,10 +41,10 @@ export const prepareXapiParam = param => {
|
||||
if (isInteger(param)) {
|
||||
return asInteger(param)
|
||||
}
|
||||
if (isBoolean(param)) {
|
||||
if (typeof param === 'boolean') {
|
||||
return asBoolean(param)
|
||||
}
|
||||
if (isArray(param)) {
|
||||
if (Array.isArray(param)) {
|
||||
return map(param, prepareXapiParam)
|
||||
}
|
||||
if (isPlainObject(param)) {
|
||||
@@ -135,14 +131,14 @@ export const makeEditObject = specs => {
|
||||
return object => object[prop]
|
||||
}
|
||||
|
||||
if (isString(get)) {
|
||||
if (typeof get === 'string') {
|
||||
return object => object[get]
|
||||
}
|
||||
|
||||
return get
|
||||
}
|
||||
const normalizeSet = (set, name) => {
|
||||
if (isFunction(set)) {
|
||||
if (typeof set === 'function') {
|
||||
return set
|
||||
}
|
||||
|
||||
@@ -153,7 +149,7 @@ export const makeEditObject = specs => {
|
||||
}
|
||||
}
|
||||
|
||||
if (isString(set)) {
|
||||
if (typeof set === 'string') {
|
||||
const index = set.indexOf('.')
|
||||
if (index === -1) {
|
||||
const prop = camelToSnakeCase(set)
|
||||
@@ -176,7 +172,7 @@ export const makeEditObject = specs => {
|
||||
}
|
||||
}
|
||||
|
||||
if (!isArray(set)) {
|
||||
if (!Array.isArray(set)) {
|
||||
throw new Error('must be an array, a function or a string')
|
||||
}
|
||||
|
||||
@@ -212,7 +208,7 @@ export const makeEditObject = specs => {
|
||||
}
|
||||
|
||||
forEach(spec.constraints, (constraint, constraintName) => {
|
||||
if (!isFunction(constraint)) {
|
||||
if (typeof constraint !== 'function') {
|
||||
throw new Error('constraint must be a function')
|
||||
}
|
||||
|
||||
@@ -234,15 +230,15 @@ export const makeEditObject = specs => {
|
||||
return spec
|
||||
}
|
||||
forEach(specs, (spec, name) => {
|
||||
isString(spec) || (specs[name] = normalizeSpec(spec, name))
|
||||
typeof spec === 'string' || (specs[name] = normalizeSpec(spec, name))
|
||||
})
|
||||
|
||||
// Resolves aliases and add camelCase and snake_case aliases.
|
||||
forEach(specs, (spec, name) => {
|
||||
if (isString(spec)) {
|
||||
if (typeof spec === 'string') {
|
||||
do {
|
||||
spec = specs[spec]
|
||||
} while (isString(spec))
|
||||
} while (typeof spec === 'string')
|
||||
specs[name] = spec
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import createLogger from '@xen-orchestra/log'
|
||||
import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import schemaInspector from 'schema-inspector'
|
||||
import { forEach, isFunction } from 'lodash'
|
||||
import { forEach } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { MethodNotFound } from 'json-rpc-peer'
|
||||
|
||||
@@ -183,7 +183,7 @@ export default class Api {
|
||||
const addMethod = (method, name) => {
|
||||
name = base + name
|
||||
|
||||
if (isFunction(method)) {
|
||||
if (typeof method === 'function') {
|
||||
removes.push(this.addApiMethod(name, method))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -53,7 +53,7 @@ import {
|
||||
type Xapi,
|
||||
TAG_COPY_SRC,
|
||||
} from '../../xapi'
|
||||
import { getVmDisks } from '../../xapi/utils'
|
||||
import { formatDateTime, getVmDisks } from '../../xapi/utils'
|
||||
import {
|
||||
resolveRelativeFromFile,
|
||||
safeDateFormat,
|
||||
@@ -75,6 +75,7 @@ type Settings = {|
|
||||
deleteFirst?: boolean,
|
||||
copyRetention?: number,
|
||||
exportRetention?: number,
|
||||
offlineBackup?: boolean,
|
||||
offlineSnapshot?: boolean,
|
||||
reportWhen?: ReportWhen,
|
||||
snapshotRetention?: number,
|
||||
@@ -147,6 +148,7 @@ const defaultSettings: Settings = {
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
reportWhen: 'failure',
|
||||
snapshotRetention: 0,
|
||||
@@ -188,7 +190,7 @@ const getJobCompression = ({ compression: c }) =>
|
||||
const listReplicatedVms = (
|
||||
xapi: Xapi,
|
||||
scheduleOrJobId: string,
|
||||
srId?: string,
|
||||
srUuid?: string,
|
||||
vmUuid?: string
|
||||
): Vm[] => {
|
||||
const { all } = xapi.objects
|
||||
@@ -203,7 +205,7 @@ const listReplicatedVms = (
|
||||
'start' in object.blocked_operations &&
|
||||
(oc['xo:backup:job'] === scheduleOrJobId ||
|
||||
oc['xo:backup:schedule'] === scheduleOrJobId) &&
|
||||
oc['xo:backup:sr'] === srId &&
|
||||
oc['xo:backup:sr'] === srUuid &&
|
||||
(oc['xo:backup:vm'] === vmUuid ||
|
||||
// 2018-03-28, JFT: to catch VMs replicated before this fix
|
||||
oc['xo:backup:vm'] === undefined)
|
||||
@@ -479,16 +481,21 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
|
||||
// Attributes on created VM snapshots:
|
||||
//
|
||||
// - `other_config`:
|
||||
// - `xo:backup:datetime` = snapshot.snapshot_time (allow sorting replicated VMs)
|
||||
// - `xo:backup:deltaChainLength` = n (number of delta copies/replicated since a full)
|
||||
// - `xo:backup:exported` = 'true' (added at the end of the backup)
|
||||
//
|
||||
// Attributes on created VMs and created snapshots:
|
||||
//
|
||||
// - `other_config`:
|
||||
// - `xo:backup:datetime`: format is UTC %Y%m%dT%H:%M:%SZ
|
||||
// - from snapshots: snapshot.snapshot_time
|
||||
// - with offline backup: formatDateTime(Date.now())
|
||||
// - `xo:backup:job` = job.id
|
||||
// - `xo:backup:schedule` = schedule.id
|
||||
// - `xo:backup:vm` = vm.uuid
|
||||
//
|
||||
// Attributes of created VMs:
|
||||
//
|
||||
// - all snapshots attributes (see above)
|
||||
// - `name_label`: `${original name} - ${job name} - (${safeDateFormat(backup timestamp)})`
|
||||
// - tag:
|
||||
// - copy in delta mode: `Continuous Replication`
|
||||
@@ -1023,6 +1030,12 @@ export default class BackupNg {
|
||||
throw new Error('copy, export and snapshot retentions cannot both be 0')
|
||||
}
|
||||
|
||||
const isOfflineBackup =
|
||||
mode === 'full' && getSetting(settings, 'offlineBackup', [vmUuid, ''])
|
||||
if (isOfflineBackup && snapshotRetention > 0) {
|
||||
throw new Error('offline backup is not compatible with rolling snapshot')
|
||||
}
|
||||
|
||||
if (
|
||||
!some(
|
||||
vm.$VBDs,
|
||||
@@ -1032,110 +1045,139 @@ export default class BackupNg {
|
||||
throw new Error('no disks found')
|
||||
}
|
||||
|
||||
const snapshots = vm.$snapshots
|
||||
.filter(_ => _.other_config['xo:backup:job'] === jobId)
|
||||
.sort(compareSnapshotTime)
|
||||
let baseSnapshot, exported: Vm, exportDateTime
|
||||
if (isOfflineBackup) {
|
||||
exported = vm
|
||||
exportDateTime = formatDateTime(Date.now())
|
||||
if (vm.power_state === 'Running') {
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'shutdown VM',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.shutdownVm(vm)
|
||||
)
|
||||
$defer(() => xapi.startVm(vm))
|
||||
}
|
||||
} else {
|
||||
const snapshots = vm.$snapshots
|
||||
.filter(_ => _.other_config['xo:backup:job'] === jobId)
|
||||
.sort(compareSnapshotTime)
|
||||
|
||||
const bypassVdiChainsCheck: boolean = getSetting(
|
||||
settings,
|
||||
'bypassVdiChainsCheck',
|
||||
[vmUuid, '']
|
||||
)
|
||||
if (!bypassVdiChainsCheck) {
|
||||
xapi._assertHealthyVdiChains(vm)
|
||||
}
|
||||
const bypassVdiChainsCheck: boolean = getSetting(
|
||||
settings,
|
||||
'bypassVdiChainsCheck',
|
||||
[vmUuid, '']
|
||||
)
|
||||
if (!bypassVdiChainsCheck) {
|
||||
xapi._assertHealthyVdiChains(vm)
|
||||
}
|
||||
|
||||
const offlineSnapshot: boolean = getSetting(settings, 'offlineSnapshot', [
|
||||
vmUuid,
|
||||
'',
|
||||
])
|
||||
const startAfterSnapshot = offlineSnapshot && vm.power_state === 'Running'
|
||||
if (startAfterSnapshot) {
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'shutdown VM',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.shutdownVm(vm)
|
||||
)
|
||||
}
|
||||
|
||||
exported = (await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'snapshot',
|
||||
parentId: taskId,
|
||||
result: _ => _.uuid,
|
||||
},
|
||||
xapi._snapshotVm(
|
||||
$cancelToken,
|
||||
vm,
|
||||
`[XO Backup ${job.name}] ${vm.name_label}`
|
||||
)
|
||||
): any)
|
||||
|
||||
if (startAfterSnapshot) {
|
||||
ignoreErrors.call(xapi.startVm(vm))
|
||||
}
|
||||
|
||||
const offlineSnapshot: boolean = getSetting(settings, 'offlineSnapshot', [
|
||||
vmUuid,
|
||||
'',
|
||||
])
|
||||
const startAfterSnapshot = offlineSnapshot && vm.power_state === 'Running'
|
||||
if (startAfterSnapshot) {
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'shutdown VM',
|
||||
message: 'add metadata to snapshot',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.shutdownVm(vm)
|
||||
)
|
||||
}
|
||||
|
||||
let snapshot: Vm = (await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'snapshot',
|
||||
parentId: taskId,
|
||||
result: _ => _.uuid,
|
||||
},
|
||||
xapi._snapshotVm(
|
||||
$cancelToken,
|
||||
vm,
|
||||
`[XO Backup ${job.name}] ${vm.name_label}`
|
||||
)
|
||||
): any)
|
||||
|
||||
if (startAfterSnapshot) {
|
||||
ignoreErrors.call(xapi.startVm(vm))
|
||||
}
|
||||
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'add metadata to snapshot',
|
||||
parentId: taskId,
|
||||
},
|
||||
snapshot.update_other_config({
|
||||
'xo:backup:datetime': snapshot.snapshot_time,
|
||||
'xo:backup:job': jobId,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vmUuid,
|
||||
})
|
||||
)
|
||||
|
||||
snapshot = await xapi.barrier(snapshot.$ref)
|
||||
|
||||
let baseSnapshot
|
||||
if (mode === 'delta') {
|
||||
baseSnapshot = findLast(
|
||||
snapshots,
|
||||
_ => 'xo:backup:exported' in _.other_config
|
||||
exported.update_other_config({
|
||||
'xo:backup:datetime': exported.snapshot_time,
|
||||
'xo:backup:job': jobId,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vmUuid,
|
||||
})
|
||||
)
|
||||
|
||||
// JFT 2018-10-02: support previous snapshots which did not have this
|
||||
// entry, can be removed after 2018-12.
|
||||
if (baseSnapshot === undefined) {
|
||||
baseSnapshot = last(snapshots)
|
||||
}
|
||||
}
|
||||
snapshots.push(snapshot)
|
||||
exported = await xapi.barrier(exported.$ref)
|
||||
|
||||
// snapshots to delete due to the snapshot retention settings
|
||||
const snapshotsToDelete = flatMap(
|
||||
groupBy(snapshots, _ => _.other_config['xo:backup:schedule']),
|
||||
(snapshots, scheduleId) =>
|
||||
getOldEntries(
|
||||
getSetting(settings, 'snapshotRetention', [scheduleId]),
|
||||
snapshots
|
||||
if (mode === 'delta') {
|
||||
baseSnapshot = findLast(
|
||||
snapshots,
|
||||
_ => 'xo:backup:exported' in _.other_config
|
||||
)
|
||||
)
|
||||
|
||||
// delete unused snapshots
|
||||
await asyncMap(snapshotsToDelete, vm => {
|
||||
// snapshot and baseSnapshot should not be deleted right now
|
||||
if (vm !== snapshot && vm !== baseSnapshot) {
|
||||
return xapi.deleteVm(vm)
|
||||
// JFT 2018-10-02: support previous snapshots which did not have this
|
||||
// entry, can be removed after 2018-12.
|
||||
if (baseSnapshot === undefined) {
|
||||
baseSnapshot = last(snapshots)
|
||||
}
|
||||
}
|
||||
})
|
||||
snapshots.push(exported)
|
||||
|
||||
snapshot = ((await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'waiting for uptodate snapshot record',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.barrier(snapshot.$ref)
|
||||
): any): Vm)
|
||||
// snapshots to delete due to the snapshot retention settings
|
||||
const snapshotsToDelete = flatMap(
|
||||
groupBy(snapshots, _ => _.other_config['xo:backup:schedule']),
|
||||
(snapshots, scheduleId) =>
|
||||
getOldEntries(
|
||||
getSetting(settings, 'snapshotRetention', [scheduleId]),
|
||||
snapshots
|
||||
)
|
||||
)
|
||||
|
||||
// delete unused snapshots
|
||||
await asyncMap(snapshotsToDelete, vm => {
|
||||
// snapshot and baseSnapshot should not be deleted right now
|
||||
if (vm !== exported && vm !== baseSnapshot) {
|
||||
return xapi.deleteVm(vm)
|
||||
}
|
||||
})
|
||||
|
||||
exported = ((await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'waiting for uptodate snapshot record',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.barrier(exported.$ref)
|
||||
): any): Vm)
|
||||
|
||||
if (mode === 'full' && snapshotsToDelete.includes(exported)) {
|
||||
// TODO: do not create the snapshot if there are no snapshotRetention and
|
||||
// the VM is not running
|
||||
$defer.call(xapi, 'deleteVm', exported)
|
||||
} else if (mode === 'delta') {
|
||||
if (snapshotsToDelete.includes(exported)) {
|
||||
$defer.onFailure.call(xapi, 'deleteVm', exported)
|
||||
}
|
||||
if (snapshotsToDelete.includes(baseSnapshot)) {
|
||||
$defer.onSuccess.call(xapi, 'deleteVm', baseSnapshot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (copyRetention === 0 && exportRetention === 0) {
|
||||
return
|
||||
@@ -1151,14 +1193,8 @@ export default class BackupNg {
|
||||
const metadataFilename = `${vmDir}/${basename}.json`
|
||||
|
||||
if (mode === 'full') {
|
||||
// TODO: do not create the snapshot if there are no snapshotRetention and
|
||||
// the VM is not running
|
||||
if (snapshotsToDelete.includes(snapshot)) {
|
||||
$defer.call(xapi, 'deleteVm', snapshot)
|
||||
}
|
||||
|
||||
let compress = getJobCompression(job)
|
||||
const pool = snapshot.$pool
|
||||
const pool = exported.$pool
|
||||
if (
|
||||
compress === 'zstd' &&
|
||||
pool.restrictions.restrict_zstd_export !== 'false'
|
||||
@@ -1175,10 +1211,10 @@ export default class BackupNg {
|
||||
let xva: any = await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'start snapshot export',
|
||||
message: 'start VM export',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.exportVm($cancelToken, snapshot, {
|
||||
xapi.exportVm($cancelToken, exported, {
|
||||
compress,
|
||||
})
|
||||
)
|
||||
@@ -1203,7 +1239,7 @@ export default class BackupNg {
|
||||
timestamp: now,
|
||||
version: '2.0.0',
|
||||
vm,
|
||||
vmSnapshot: snapshot,
|
||||
vmSnapshot: exported.id !== vm.id ? exported : undefined,
|
||||
xva: `./${dataBasename}`,
|
||||
}
|
||||
const dataFilename = `${vmDir}/${dataBasename}`
|
||||
@@ -1287,7 +1323,7 @@ export default class BackupNg {
|
||||
async (taskId, sr) => {
|
||||
const fork = forkExport()
|
||||
|
||||
const { $id: srId, xapi } = sr
|
||||
const { uuid: srUuid, xapi } = sr
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(
|
||||
@@ -1299,7 +1335,7 @@ export default class BackupNg {
|
||||
|
||||
const oldVms = getOldEntries(
|
||||
copyRetention - 1,
|
||||
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
|
||||
listReplicatedVms(xapi, scheduleId, srUuid, vmUuid)
|
||||
)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
@@ -1311,7 +1347,9 @@ export default class BackupNg {
|
||||
},
|
||||
this._deleteVms(xapi, oldVms)
|
||||
)
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [
|
||||
srUuid,
|
||||
])
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
@@ -1341,7 +1379,15 @@ export default class BackupNg {
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
vm.update_other_config('xo:backup:sr', srId),
|
||||
!isOfflineBackup
|
||||
? vm.update_other_config('xo:backup:sr', srUuid)
|
||||
: vm.update_other_config({
|
||||
'xo:backup:datetime': exportDateTime,
|
||||
'xo:backup:job': jobId,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:sr': srUuid,
|
||||
'xo:backup:vm': exported.uuid,
|
||||
}),
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
@@ -1354,13 +1400,6 @@ export default class BackupNg {
|
||||
noop // errors are handled in logs
|
||||
)
|
||||
} else if (mode === 'delta') {
|
||||
if (snapshotsToDelete.includes(snapshot)) {
|
||||
$defer.onFailure.call(xapi, 'deleteVm', snapshot)
|
||||
}
|
||||
if (snapshotsToDelete.includes(baseSnapshot)) {
|
||||
$defer.onSuccess.call(xapi, 'deleteVm', baseSnapshot)
|
||||
}
|
||||
|
||||
let deltaChainLength = 0
|
||||
let fullVdisRequired
|
||||
await (async () => {
|
||||
@@ -1398,11 +1437,11 @@ export default class BackupNg {
|
||||
}
|
||||
})
|
||||
|
||||
for (const { $id: srId, xapi } of srs) {
|
||||
for (const { uuid: srUuid, xapi } of srs) {
|
||||
const replicatedVm = listReplicatedVms(
|
||||
xapi,
|
||||
jobId,
|
||||
srId,
|
||||
srUuid,
|
||||
vmUuid
|
||||
).find(vm => vm.other_config[TAG_COPY_SRC] === baseSnapshot.uuid)
|
||||
if (replicatedVm === undefined) {
|
||||
@@ -1468,7 +1507,7 @@ export default class BackupNg {
|
||||
message: 'start snapshot export',
|
||||
parentId: taskId,
|
||||
},
|
||||
xapi.exportDeltaVm($cancelToken, snapshot, baseSnapshot, {
|
||||
xapi.exportDeltaVm($cancelToken, exported, baseSnapshot, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
)
|
||||
@@ -1490,7 +1529,7 @@ export default class BackupNg {
|
||||
}/${basename}.vhd`
|
||||
),
|
||||
vm,
|
||||
vmSnapshot: snapshot,
|
||||
vmSnapshot: exported,
|
||||
}
|
||||
|
||||
const jsonMetadata = JSON.stringify(metadata)
|
||||
@@ -1656,7 +1695,7 @@ export default class BackupNg {
|
||||
async (taskId, sr) => {
|
||||
const fork = forkExport()
|
||||
|
||||
const { $id: srId, xapi } = sr
|
||||
const { uuid: srUuid, xapi } = sr
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(
|
||||
@@ -1668,7 +1707,7 @@ export default class BackupNg {
|
||||
|
||||
const oldVms = getOldEntries(
|
||||
copyRetention - 1,
|
||||
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
|
||||
listReplicatedVms(xapi, scheduleId, srUuid, vmUuid)
|
||||
)
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
@@ -1681,7 +1720,9 @@ export default class BackupNg {
|
||||
this._deleteVms(xapi, oldVms)
|
||||
)
|
||||
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
|
||||
const deleteFirst = getSetting(settings, 'deleteFirst', [
|
||||
srUuid,
|
||||
])
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
@@ -1698,7 +1739,7 @@ export default class BackupNg {
|
||||
name_label: `${metadata.vm.name_label} - ${
|
||||
job.name
|
||||
} - (${safeDateFormat(metadata.timestamp)})`,
|
||||
srId,
|
||||
srId: sr.$id,
|
||||
})
|
||||
)
|
||||
|
||||
@@ -1709,7 +1750,7 @@ export default class BackupNg {
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
vm.update_other_config('xo:backup:sr', srId),
|
||||
vm.update_other_config('xo:backup:sr', srUuid),
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
@@ -1724,7 +1765,7 @@ export default class BackupNg {
|
||||
|
||||
if (!isFull) {
|
||||
ignoreErrors.call(
|
||||
snapshot.update_other_config(
|
||||
exported.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(deltaChainLength)
|
||||
)
|
||||
@@ -1734,14 +1775,16 @@ export default class BackupNg {
|
||||
throw new Error(`no exporter for backup mode ${mode}`)
|
||||
}
|
||||
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'set snapshot.other_config[xo:backup:exported]',
|
||||
parentId: taskId,
|
||||
},
|
||||
snapshot.update_other_config('xo:backup:exported', 'true')
|
||||
)
|
||||
if (!isOfflineBackup) {
|
||||
await wrapTask(
|
||||
{
|
||||
logger,
|
||||
message: 'set snapshot.other_config[xo:backup:exported]',
|
||||
parentId: taskId,
|
||||
},
|
||||
exported.update_other_config('xo:backup:exported', 'true')
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteDeltaVmBackups(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import deferrable from 'golike-defer'
|
||||
import escapeStringRegexp from 'escape-string-regexp'
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { CancelToken, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
@@ -10,7 +9,15 @@ import { createReadStream, readdir, stat } from 'fs'
|
||||
import { satisfies as versionSatisfies } from 'semver'
|
||||
import { utcFormat } from 'd3-time-format'
|
||||
import { basename, dirname } from 'path'
|
||||
import { filter, find, includes, once, range, sortBy, trim } from 'lodash'
|
||||
import {
|
||||
escapeRegExp,
|
||||
filter,
|
||||
find,
|
||||
includes,
|
||||
once,
|
||||
range,
|
||||
sortBy,
|
||||
} from 'lodash'
|
||||
import {
|
||||
chainVhd,
|
||||
createSyntheticStream as createVhdReadStream,
|
||||
@@ -19,6 +26,7 @@ import {
|
||||
|
||||
import createSizeStream from '../size-stream'
|
||||
import xapiObjectToXo from '../xapi-object-to-xo'
|
||||
import { debounceWithKey } from '../_pDebounceWithKey'
|
||||
import { lvs, pvs } from '../lvm'
|
||||
import {
|
||||
forEach,
|
||||
@@ -36,6 +44,7 @@ import {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const DEBOUNCE_DELAY = 10e3
|
||||
const DELTA_BACKUP_EXT = '.json'
|
||||
const DELTA_BACKUP_EXT_LENGTH = DELTA_BACKUP_EXT.length
|
||||
const TAG_SOURCE_VM = 'xo:source_vm'
|
||||
@@ -139,22 +148,20 @@ const listPartitions = (() => {
|
||||
})
|
||||
|
||||
return device =>
|
||||
execa
|
||||
.stdout('partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
device.path,
|
||||
])
|
||||
.then(stdout =>
|
||||
mapFilter(splitLines(stdout), line => {
|
||||
const partition = parseLine(line)
|
||||
const { type } = partition
|
||||
if (type != null && !IGNORED[+type]) {
|
||||
return partition
|
||||
}
|
||||
})
|
||||
)
|
||||
execa('partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
device.path,
|
||||
]).then(({ stdout }) =>
|
||||
mapFilter(splitLines(stdout), line => {
|
||||
const partition = parseLine(line)
|
||||
const { type } = partition
|
||||
if (type != null && !IGNORED[+type]) {
|
||||
return partition
|
||||
}
|
||||
})
|
||||
)
|
||||
})()
|
||||
|
||||
// handle LVM logical volumes automatically
|
||||
@@ -271,8 +278,8 @@ const mountLvmPv = (device, partition) => {
|
||||
}
|
||||
args.push('--show', '-f', device.path)
|
||||
|
||||
return execa.stdout('losetup', args).then(stdout => {
|
||||
const path = trim(stdout)
|
||||
return execa('losetup', args).then(({ stdout }) => {
|
||||
const path = stdout.trim()
|
||||
return {
|
||||
path,
|
||||
unmount: once(() =>
|
||||
@@ -294,6 +301,9 @@ export default class {
|
||||
this._xo = xo
|
||||
}
|
||||
|
||||
@debounceWithKey.decorate(DEBOUNCE_DELAY, function keyFn(remoteId) {
|
||||
return [this, remoteId]
|
||||
})
|
||||
async listRemoteBackups(remoteId) {
|
||||
const handler = await this._xo.getRemoteHandler(remoteId)
|
||||
|
||||
@@ -320,6 +330,9 @@ export default class {
|
||||
return backups
|
||||
}
|
||||
|
||||
@debounceWithKey.decorate(DEBOUNCE_DELAY, function keyFn(remoteId) {
|
||||
return [this, remoteId]
|
||||
})
|
||||
async listVmBackups(remoteId) {
|
||||
const handler = await this._xo.getRemoteHandler(remoteId)
|
||||
|
||||
@@ -862,7 +875,7 @@ export default class {
|
||||
const files = await handler.list('.')
|
||||
|
||||
const reg = new RegExp(
|
||||
'^[^_]+_' + escapeStringRegexp(`${tag}_${vm.name_label}.xva`)
|
||||
'^[^_]+_' + escapeRegExp(`${tag}_${vm.name_label}.xva`)
|
||||
)
|
||||
const backups = sortBy(filter(files, fileName => reg.test(fileName)))
|
||||
|
||||
@@ -887,9 +900,7 @@ export default class {
|
||||
|
||||
xapi._assertHealthyVdiChains(vm)
|
||||
|
||||
const reg = new RegExp(
|
||||
'^rollingSnapshot_[^_]+_' + escapeStringRegexp(tag) + '_'
|
||||
)
|
||||
const reg = new RegExp('^rollingSnapshot_[^_]+_' + escapeRegExp(tag) + '_')
|
||||
const snapshots = sortBy(
|
||||
filter(vm.$snapshots, snapshot => reg.test(snapshot.name_label)),
|
||||
'name_label'
|
||||
@@ -926,9 +937,7 @@ export default class {
|
||||
const transferStart = Date.now()
|
||||
tag = 'DR_' + tag
|
||||
const reg = new RegExp(
|
||||
'^' +
|
||||
escapeStringRegexp(`${vm.name_label}_${tag}_`) +
|
||||
'[0-9]{8}T[0-9]{6}Z$'
|
||||
'^' + escapeRegExp(`${vm.name_label}_${tag}_`) + '[0-9]{8}T[0-9]{6}Z$'
|
||||
)
|
||||
|
||||
const targetXapi = this._xo.getXapi(sr)
|
||||
|
||||
@@ -87,7 +87,7 @@ async function mountLvmPhysicalVolume(devicePath, partition) {
|
||||
args.push('-o', partition.start * 512)
|
||||
}
|
||||
args.push('--show', '-f', devicePath)
|
||||
const path = (await execa.stdout('losetup', args)).trim()
|
||||
const path = (await execa('losetup', args)).stdout.trim()
|
||||
await execa('pvscan', ['--cache', path])
|
||||
|
||||
return {
|
||||
@@ -251,7 +251,7 @@ export default class BackupNgFileRestore {
|
||||
}
|
||||
|
||||
async _listPartitions(devicePath, inspectLvmPv = true) {
|
||||
const stdout = await execa.stdout('partx', [
|
||||
const { stdout } = await execa('partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { timeout } from 'promise-toolbox'
|
||||
import { assign, filter, isEmpty, map, mapValues } from 'lodash'
|
||||
import { filter, isEmpty, map, mapValues } from 'lodash'
|
||||
|
||||
import { crossProduct } from '../../math'
|
||||
import { serializeError, thunkToArray } from '../../utils'
|
||||
@@ -82,7 +82,11 @@ export default async function executeJobCall({
|
||||
params,
|
||||
start: Date.now(),
|
||||
})
|
||||
let promise = app.callApiMethod(session, job.method, assign({}, params))
|
||||
let promise = app.callApiMethod(
|
||||
session,
|
||||
job.method,
|
||||
Object.assign({}, params)
|
||||
)
|
||||
if (job.timeout) {
|
||||
promise = promise::timeout(job.timeout)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import { invalidParameters, noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import * as sensitiveValues from '../sensitive-values'
|
||||
import { PluginsMetadata } from '../models/plugin-metadata'
|
||||
import { isFunction, mapToArray } from '../utils'
|
||||
import { mapToArray } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -65,9 +65,9 @@ export default class {
|
||||
id,
|
||||
instance,
|
||||
name,
|
||||
testable: isFunction(instance.test),
|
||||
testable: typeof instance.test === 'function',
|
||||
testSchema,
|
||||
unloadable: isFunction(instance.unload),
|
||||
unloadable: typeof instance.unload === 'function',
|
||||
version,
|
||||
})
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import {
|
||||
assign,
|
||||
every,
|
||||
forEach,
|
||||
isObject,
|
||||
@@ -123,7 +122,7 @@ export default class {
|
||||
}
|
||||
|
||||
async computeVmResourcesUsage(vm) {
|
||||
return assign(
|
||||
return Object.assign(
|
||||
computeVmResourcesUsage(this._xo.getXapi(vm).getObject(vm._xapiId)),
|
||||
await this._xo.computeVmIpPoolsUsage(vm)
|
||||
)
|
||||
|
||||
@@ -77,7 +77,10 @@ export default class Scheduling {
|
||||
'schedules',
|
||||
() => db.get(),
|
||||
schedules =>
|
||||
asyncMap(schedules, schedule => db.update(normalize(schedule))),
|
||||
asyncMap(schedules, async schedule => {
|
||||
await db.update(normalize(schedule))
|
||||
this._start(schedule.id)
|
||||
}),
|
||||
['jobs']
|
||||
)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import levelup from 'level-party'
|
||||
import sublevel from 'level-sublevel'
|
||||
import { ensureDir } from 'fs-extra'
|
||||
|
||||
import { forEach, isFunction, promisify } from '../utils'
|
||||
import { forEach, promisify } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -32,7 +32,7 @@ const levelHas = db => {
|
||||
const levelPromise = db => {
|
||||
const dbP = {}
|
||||
forEach(db, (value, name) => {
|
||||
if (!isFunction(value)) {
|
||||
if (typeof value !== 'function') {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -10,13 +10,7 @@ import parseDuration from '../_parseDuration'
|
||||
import Xapi from '../xapi'
|
||||
import xapiObjectToXo from '../xapi-object-to-xo'
|
||||
import XapiStats from '../xapi-stats'
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
forEach,
|
||||
isEmpty,
|
||||
isString,
|
||||
popProperty,
|
||||
} from '../utils'
|
||||
import { camelToSnakeCase, forEach, isEmpty, popProperty } from '../utils'
|
||||
import { Servers } from '../models/server'
|
||||
|
||||
// ===================================================================
|
||||
@@ -461,7 +455,7 @@ export default class {
|
||||
|
||||
// Returns the XAPI connection associated to an object.
|
||||
getXapi(object, type) {
|
||||
if (isString(object)) {
|
||||
if (typeof object === 'string') {
|
||||
object = this._xo.getObject(object, type)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,8 +9,6 @@ import {
|
||||
forEach,
|
||||
includes,
|
||||
isEmpty,
|
||||
isFunction,
|
||||
isString,
|
||||
iteratee,
|
||||
map as mapToArray,
|
||||
stubTrue,
|
||||
@@ -73,7 +71,8 @@ export default class Xo extends EventEmitter {
|
||||
|
||||
if (
|
||||
type != null &&
|
||||
((isString(type) && type !== obj.type) || !includes(type, obj.type)) // Array
|
||||
((typeof type === 'string' && type !== obj.type) ||
|
||||
!includes(type, obj.type)) // Array
|
||||
) {
|
||||
throw noSuchObject(key, type)
|
||||
}
|
||||
@@ -210,7 +209,7 @@ export default class Xo extends EventEmitter {
|
||||
}
|
||||
|
||||
// For security, prevent from accessing `this`.
|
||||
if (isFunction(value)) {
|
||||
if (typeof value === 'function') {
|
||||
value = (value =>
|
||||
function() {
|
||||
return value.apply(thisArg, arguments)
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"child-process-promise": "^2.0.3",
|
||||
"core-js": "^3.0.0",
|
||||
"pipette": "^0.9.3",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"tmp": "^0.1.0",
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"execa": "^2.0.2",
|
||||
"fs-extra": "^8.0.1",
|
||||
|
||||
@@ -43,6 +43,6 @@ test('VMDKDirectParser reads OK', async () => {
|
||||
}
|
||||
expect(harvested.length).toEqual(2)
|
||||
expect(harvested[0].offsetBytes).toEqual(0)
|
||||
expect(harvested[0].data.length).toEqual(header['grainSizeSectors'] * 512)
|
||||
expect(harvested[1].offsetBytes).toEqual(header['grainSizeSectors'] * 512)
|
||||
expect(harvested[0].data.length).toEqual(header.grainSizeSectors * 512)
|
||||
expect(harvested[1].offsetBytes).toEqual(header.grainSizeSectors * 512)
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user