Compare commits
123 Commits
hub-xva-de
...
correct-ne
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
301f2f5d72 | ||
|
|
a3be8dc6fa | ||
|
|
06f596adc6 | ||
|
|
1f3b54e0c4 | ||
|
|
2ddfbe8566 | ||
|
|
c61a118e4f | ||
|
|
d69e61a634 | ||
|
|
14f0cbaec6 | ||
|
|
b313eb14ee | ||
|
|
7b47e40244 | ||
|
|
b52204817d | ||
|
|
377552103e | ||
|
|
688b65ccde | ||
|
|
6cb4faf33d | ||
|
|
78b83bb901 | ||
|
|
9ff6f60b66 | ||
|
|
624e10ed15 | ||
|
|
19e10bbb53 | ||
|
|
cca945e05b | ||
|
|
21901f2a75 | ||
|
|
ef7f943eee | ||
|
|
ec1062f9f2 | ||
|
|
2f67ed3138 | ||
|
|
ce912db30e | ||
|
|
41d790f346 | ||
|
|
bf426e15ec | ||
|
|
e4403baeb9 | ||
|
|
61101b00a1 | ||
|
|
69f8ffcfeb | ||
|
|
6b8042291c | ||
|
|
ffc0c83b50 | ||
|
|
8ccd4c269a | ||
|
|
934ec86f93 | ||
|
|
23be38b5fa | ||
|
|
fe7f74e46b | ||
|
|
a3fd78f8e2 | ||
|
|
137bad6f7b | ||
|
|
17df6fc764 | ||
|
|
2e51c8a124 | ||
|
|
5588a46366 | ||
|
|
a8122f9add | ||
|
|
5568be91d2 | ||
|
|
a04bd6f93c | ||
|
|
56d63b10e4 | ||
|
|
2c97643b10 | ||
|
|
679f403648 | ||
|
|
d482c707f6 | ||
|
|
2ec9641783 | ||
|
|
dab1788a3b | ||
|
|
47bb79cce1 | ||
|
|
41dbc20be9 | ||
|
|
10a631ec96 | ||
|
|
830e5aed96 | ||
|
|
7db573885b | ||
|
|
a74d56ebc6 | ||
|
|
ff7d84297e | ||
|
|
3a76509fe9 | ||
|
|
ac4de9ab0f | ||
|
|
471f397418 | ||
|
|
73bbdf6d4e | ||
|
|
7f26aea585 | ||
|
|
1c767b709f | ||
|
|
0ced82c885 | ||
|
|
21dd195b0d | ||
|
|
6aa6cfba8e | ||
|
|
fd7d52d38b | ||
|
|
a47bb14364 | ||
|
|
d6e6fa5735 | ||
|
|
46da11a52e | ||
|
|
68e3dc21e4 | ||
|
|
7232cc45b4 | ||
|
|
be5a297248 | ||
|
|
257031b1bc | ||
|
|
c9db9fa17a | ||
|
|
13f961a422 | ||
|
|
3b38e0c4e1 | ||
|
|
07526efe61 | ||
|
|
8753c02adb | ||
|
|
6a0bbfa447 | ||
|
|
21faaeb33d | ||
|
|
0525fc5909 | ||
|
|
a1a53bb285 | ||
|
|
0c453c4415 | ||
|
|
d0406f9736 | ||
|
|
ba74b8603d | ||
|
|
c675a4d61d | ||
|
|
965c45bc70 | ||
|
|
139a22602a | ||
|
|
e0e4969198 | ||
|
|
08d69d95b3 | ||
|
|
4e6c507ba9 | ||
|
|
fd06374365 | ||
|
|
a07ebc636a | ||
|
|
4c151ac9aa | ||
|
|
05c425698f | ||
|
|
2a961979e6 | ||
|
|
211ede92cc | ||
|
|
256af03772 | ||
|
|
654fd5a4f9 | ||
|
|
541d90e49f | ||
|
|
974e7038e7 | ||
|
|
e2f5b30aa9 | ||
|
|
3483e7d9e0 | ||
|
|
56cb20a1af | ||
|
|
64929653dd | ||
|
|
c955da9bc6 | ||
|
|
291354fa8e | ||
|
|
905d736512 | ||
|
|
3406d6e2a9 | ||
|
|
fc10b5ffb9 | ||
|
|
f89c313166 | ||
|
|
7c734168d0 | ||
|
|
1e7bfec2ce | ||
|
|
1eb0603b4e | ||
|
|
4b32730ce8 | ||
|
|
ad083c1d9b | ||
|
|
b4f84c2de2 | ||
|
|
fc17443ce4 | ||
|
|
342ae06b21 | ||
|
|
093fb7f959 | ||
|
|
f6472424ad | ||
|
|
31ed3767c6 | ||
|
|
366acb65ea |
@@ -21,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -40,6 +40,13 @@ module.exports = {
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
// disabled because not always relevant, we might reconsider in the future
|
||||
//
|
||||
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
|
||||
//
|
||||
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -8,5 +8,8 @@
|
||||
"directory": "@xen-orchestra/babel-config",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
}
|
||||
|
||||
32
@xen-orchestra/backups-cli/_composeCommands.js
Normal file
32
@xen-orchestra/backups-cli/_composeCommands.js
Normal file
@@ -0,0 +1,32 @@
|
||||
const getopts = require('getopts')
|
||||
|
||||
const { version } = require('./package.json')
|
||||
|
||||
module.exports = commands =>
|
||||
async function(args, prefix) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
help: 'h',
|
||||
},
|
||||
boolean: ['help'],
|
||||
stopEarly: true,
|
||||
})
|
||||
|
||||
const commandName = opts.help || args.length === 0 ? 'help' : args[0]
|
||||
const command = commands[commandName]
|
||||
if (command === undefined) {
|
||||
process.stdout.write(`Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${prefix} ${command} ${commands[command].usage || ''}`)
|
||||
.join('\n\n')}
|
||||
|
||||
xo-backups v${version}
|
||||
`)
|
||||
process.exitCode = commandName === 'help' ? 0 : 1
|
||||
return
|
||||
}
|
||||
|
||||
return command.main(args.slice(1), prefix + ' ' + commandName)
|
||||
}
|
||||
393
@xen-orchestra/backups-cli/commands/clean-vms.js
Normal file
393
@xen-orchestra/backups-cli/commands/clean-vms.js
Normal file
@@ -0,0 +1,393 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let force
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const getopts = require('getopts')
|
||||
const lockfile = require('proper-lockfile')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { curryRight, flatten } = require('lodash')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { pipe, promisifyAll } = require('promise-toolbox')
|
||||
|
||||
const fs = promisifyAll(require('fs'))
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
|
||||
const filter = (...args) => thisArg => thisArg.filter(...args)
|
||||
|
||||
const isGzipFile = async fd => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, magicNumber, 0, magicNumber.length, 0),
|
||||
magicNumber.length
|
||||
)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
// TODO: better check?
|
||||
//
|
||||
// our heuristic is not good enough, there has been some false positives
|
||||
// (detected as invalid by us but valid by `tar` and imported with success),
|
||||
// either THOUGH THEY MAY HAVE BEEN COMPRESSED FILES:
|
||||
// - these files were normal but the check is incorrect
|
||||
// - these files were invalid but without data loss
|
||||
// - these files were invalid but with silent data loss
|
||||
//
|
||||
// maybe reading the end of the file looking for a file named
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async (size, fd) => {
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, buf, 0, buf.length, size - buf.length),
|
||||
buf.length
|
||||
)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
const isValidXva = async path => {
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
if (size < 20) {
|
||||
// neither a valid gzip not tar
|
||||
return false
|
||||
}
|
||||
|
||||
return (await isGzipFile(fd))
|
||||
? true // gzip files cannot be validated at this time
|
||||
: await isValidTar(size, fd)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidXva', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const readDir = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = `${path}/${entry}`
|
||||
})
|
||||
|
||||
return entries
|
||||
},
|
||||
error => {
|
||||
// a missing dir is by definition empty
|
||||
if (error != null && error.code === 'ENOENT') {
|
||||
return []
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
force && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
readDir,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas] = await readDir(vmDir).then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidXva(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
force: 'f',
|
||||
},
|
||||
boolean: ['force'],
|
||||
default: {
|
||||
force: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ force } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
}
|
||||
})
|
||||
}
|
||||
13
@xen-orchestra/backups-cli/index.js
Executable file
13
@xen-orchestra/backups-cli/index.js
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
require('./_composeCommands')({
|
||||
'clean-vms': {
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: '[--force] xo-vm-backups/*',
|
||||
},
|
||||
})(process.argv.slice(2), 'xo-backups').catch(error => {
|
||||
console.error('main', error)
|
||||
process.exitCode = 1
|
||||
})
|
||||
28
@xen-orchestra/backups-cli/package.json
Normal file
28
@xen-orchestra/backups-cli/package.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"bin": {
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.7.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/backups-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.0.0"
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.27.2"
|
||||
"xen-api": "^0.27.3"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.4",
|
||||
"version": "1.0.6",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -5,14 +5,21 @@ import parse from './parse'
|
||||
|
||||
const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
function nextDelay(schedule) {
|
||||
const now = schedule._createDate()
|
||||
return next(schedule._schedule, now) - now
|
||||
}
|
||||
|
||||
class Job {
|
||||
constructor(schedule, fn) {
|
||||
let scheduledDate
|
||||
const wrapper = () => {
|
||||
const now = Date.now()
|
||||
if (scheduledDate > now) {
|
||||
// we're early, delay
|
||||
//
|
||||
// no need to check _isEnabled, we're just delaying the existing timeout
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4625
|
||||
this._timeout = setTimeout(wrapper, scheduledDate - now)
|
||||
return
|
||||
}
|
||||
|
||||
this._isRunning = true
|
||||
|
||||
let result
|
||||
@@ -32,7 +39,9 @@ class Job {
|
||||
this._isRunning = false
|
||||
|
||||
if (this._isEnabled) {
|
||||
const delay = nextDelay(schedule)
|
||||
const now = schedule._createDate()
|
||||
scheduledDate = +next(schedule._schedule, now)
|
||||
const delay = scheduledDate - now
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
|
||||
@@ -2,12 +2,24 @@
|
||||
|
||||
import { createSchedule } from './'
|
||||
|
||||
const wrap = value => () => value
|
||||
|
||||
describe('issues', () => {
|
||||
let originalDateNow
|
||||
beforeAll(() => {
|
||||
originalDateNow = Date.now
|
||||
})
|
||||
afterAll(() => {
|
||||
Date.now = originalDateNow
|
||||
originalDateNow = undefined
|
||||
})
|
||||
|
||||
test('stop during async execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
const schedule = createSchedule('* * * * *')
|
||||
const job = schedule.createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
@@ -18,6 +30,7 @@ describe('issues', () => {
|
||||
})
|
||||
|
||||
job.start()
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
@@ -35,7 +48,8 @@ describe('issues', () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
const schedule = createSchedule('* * * * *')
|
||||
const job = schedule.createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
@@ -46,6 +60,7 @@ describe('issues', () => {
|
||||
})
|
||||
|
||||
job.start()
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
@@ -56,6 +71,7 @@ describe('issues', () => {
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(2)
|
||||
})
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# @xen-orchestra/defined [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/defined):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
> npm install --save @xen-orchestra/defined
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -62,10 +62,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.10.1",
|
||||
"version": "0.10.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -18,19 +18,19 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.14.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"execa": "^3.2.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
@@ -46,7 +46,7 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
|
||||
@@ -389,7 +389,7 @@ export default class RemoteHandlerAbstract {
|
||||
async test(): Promise<Object> {
|
||||
const SIZE = 1024 * 1024 * 10
|
||||
const testFileName = normalizePath(`${Date.now()}.test`)
|
||||
const data = await fromCallback(cb => randomBytes(SIZE, cb))
|
||||
const data = await fromCallback(randomBytes, SIZE)
|
||||
let step = 'write'
|
||||
try {
|
||||
const writeStart = process.hrtime()
|
||||
|
||||
@@ -86,7 +86,7 @@ handlers.forEach(url => {
|
||||
describe('#createOutputStream()', () => {
|
||||
it('creates parent dir if missing', async () => {
|
||||
const stream = await handler.createOutputStream('dir/file')
|
||||
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
@@ -106,7 +106,7 @@ handlers.forEach(url => {
|
||||
describe('#createWriteStream()', () => {
|
||||
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
|
||||
const stream = await handler.createWriteStream(file, { flags })
|
||||
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
})
|
||||
|
||||
|
||||
@@ -47,8 +47,19 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
})
|
||||
}
|
||||
|
||||
_getInfo() {
|
||||
return df.file(this._getFilePath('/'))
|
||||
async _getInfo() {
|
||||
// df.file() resolves with an object with the following properties:
|
||||
// filesystem, type, size, used, available, capacity and mountpoint.
|
||||
// size, used, available and capacity may be `NaN` so we remove any `NaN`
|
||||
// value from the object.
|
||||
const info = await df.file(this._getFilePath('/'))
|
||||
Object.keys(info).forEach(key => {
|
||||
if (Number.isNaN(info[key])) {
|
||||
delete info[key]
|
||||
}
|
||||
})
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
|
||||
@@ -15,7 +15,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/log):
|
||||
Everywhere something should be logged:
|
||||
|
||||
```js
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const log = createLogger('my-module')
|
||||
|
||||
@@ -42,6 +42,7 @@ log.error('could not join server', {
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
```js
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { configure, catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
import transportConsole from '@xen-orchestra/log/transports/console'
|
||||
import transportEmail from '@xen-orchestra/log/transports/email'
|
||||
@@ -77,8 +78,8 @@ configure([
|
||||
])
|
||||
|
||||
// send all global errors (uncaught exceptions, warnings, unhandled rejections)
|
||||
// to this transport
|
||||
catchGlobalErrors(transport)
|
||||
// to this logger
|
||||
catchGlobalErrors(createLogger('app'))
|
||||
```
|
||||
|
||||
### Transports
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
@@ -48,7 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# @xen-orchestra/mixin [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/mixin):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
> npm install --save @xen-orchestra/mixin
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-dev": "^1.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
114
CHANGELOG.md
114
CHANGELOG.md
@@ -4,18 +4,124 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
|
||||
- [SAML] Setting to disable requested authentication context (helps with _Active Directory_) (PR [#4675](https://github.com/vatesfr/xen-orchestra/pull/4675))
|
||||
- The default sign-in page can be configured via `authentication.defaultSignInPage` (PR [#4678](https://github.com/vatesfr/xen-orchestra/pull/4678))
|
||||
- [SR] Allow import of VHD and VMDK disks [#4137](https://github.com/vatesfr/xen-orchestra/issues/4137) (PR [#4138](https://github.com/vatesfr/xen-orchestra/pull/4138) )
|
||||
- [Host] Advanced Live Telemetry (PR [#4680](https://github.com/vatesfr/xen-orchestra/pull/4680))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Metadata backup] Add 10 minutes timeout to avoid stuck jobs [#4657](https://github.com/vatesfr/xen-orchestra/issues/4657) (PR [#4666](https://github.com/vatesfr/xen-orchestra/pull/4666))
|
||||
- [Metadata backups] Fix out-of-date listing for 1 minute due to cache (PR [#4672](https://github.com/vatesfr/xen-orchestra/pull/4672))
|
||||
- [Delta backup] Limit the number of merged deltas per run to avoid interrupted jobs (PR [#4674](https://github.com/vatesfr/xen-orchestra/pull/4674))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server v5.51.0
|
||||
- vhd-lib v0.7.2
|
||||
- xo-vmdk-to-vhd v0.1.8
|
||||
- xo-server-auth-ldap v0.6.6
|
||||
- xo-server-auth-saml v0.7.0
|
||||
- xo-server-backup-reports v0.16.4
|
||||
- @xen-orchestra/fs v0.10.2
|
||||
- xo-server v5.53.0
|
||||
- xo-web v5.53.1
|
||||
|
||||
## **5.40.2** (2019-11-22)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Logs] Ability to report a bug with attached log (PR [#4201](https://github.com/vatesfr/xen-orchestra/pull/4201))
|
||||
- [Backup] Reduce _VDI chain protection error_ occurrence by being more tolerant (configurable via `xo-server`'s `xapiOptions.maxUncoalescedVdis` setting) [#4124](https://github.com/vatesfr/xen-orchestra/issues/4124) (PR [#4651](https://github.com/vatesfr/xen-orchestra/pull/4651))
|
||||
- [Plugin] [Web hooks](https://xen-orchestra.com/docs/web-hooks.html) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
|
||||
- [Tables] Always put the tables' search in the URL [#4542](https://github.com/vatesfr/xen-orchestra/issues/4542) (PR [#4637](https://github.com/vatesfr/xen-orchestra/pull/4637))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SDN controller] Prevent private network creation on bond slave PIF (Fixes https://github.com/xcp-ng/xcp/issues/300) (PR [4633](https://github.com/vatesfr/xen-orchestra/pull/4633))
|
||||
- [Metadata backup] Fix failed backup reported as successful [#4596](https://github.com/vatesfr/xen-orchestra/issues/4596) (PR [#4598](https://github.com/vatesfr/xen-orchestra/pull/4598))
|
||||
- [Backup NG] Fix "task cancelled" error when the backup job timeout exceeds 596 hours [#4662](https://github.com/vatesfr/xen-orchestra/issues/4662) (PR [#4663](https://github.com/vatesfr/xen-orchestra/pull/4663))
|
||||
- Fix `promise rejected with non-error` warnings in logs (PR [#4659](https://github.com/vatesfr/xen-orchestra/pull/4659))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-web-hooks v0.1.0
|
||||
- xen-api v0.27.3
|
||||
- xo-server-backup-reports v0.16.3
|
||||
- vhd-lib v0.7.1
|
||||
- xo-server v5.52.1
|
||||
- xo-web v5.52.0
|
||||
|
||||
## **5.40.1** (2019-10-29)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOSAN] Fix "Install Cloud plugin" warning (PR [#4631](https://github.com/vatesfr/xen-orchestra/pull/4631))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.51.1
|
||||
|
||||
## **5.40.0** (2019-10-29)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `xo-server` requires Node 8.
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
|
||||
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
|
||||
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
|
||||
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
|
||||
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
|
||||
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
|
||||
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
|
||||
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/cron v1.0.6
|
||||
- xo-server-transport-icinga2 v0.1.0
|
||||
- xo-server-sdn-controller v0.3.1
|
||||
- xo-server v5.51.1
|
||||
- xo-web v5.51.0
|
||||
|
||||
### Dropped packages
|
||||
|
||||
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
|
||||
|
||||
|
||||
## **5.39.1** (2019-10-11)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
|
||||
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.50.3
|
||||
|
||||
|
||||
## **5.39.0** (2019-09-30)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
@@ -69,8 +175,6 @@
|
||||
|
||||
## **5.38.0** (2019-08-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
|
||||
- [Host] Fix Enable Live Telemetry button state (PR [#4686](https://github.com/vatesfr/xen-orchestra/pull/4686))
|
||||
- [Host] Fix Advanced Live Telemetry URL (PR [#4687](https://github.com/vatesfr/xen-orchestra/pull/4687))
|
||||
|
||||
### Released packages
|
||||
|
||||
@@ -20,5 +21,5 @@
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- xo-server v5.51.0
|
||||
- xo-web v5.51.0
|
||||
- xo-server v5.54.0
|
||||
- xo-web v5.54.0
|
||||
|
||||
@@ -51,6 +51,7 @@
|
||||
* [Health](health.md)
|
||||
* [Job manager](scheduler.md)
|
||||
* [Alerts](alerts.md)
|
||||
* [Web hooks](web-hooks.md)
|
||||
* [Load balancing](load_balancing.md)
|
||||
* [Emergency Shutdown](emergency_shutdown.md)
|
||||
* [Auto scalability](auto_scalability.md)
|
||||
|
||||
BIN
docs/assets/release-channels.png
Normal file
BIN
docs/assets/release-channels.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 99 KiB |
@@ -22,7 +22,7 @@ group = 'nogroup'
|
||||
By default, XO-server listens on all addresses (0.0.0.0) and runs on port 80. If you need to, you can change this in the `# Basic HTTP` section:
|
||||
|
||||
```toml
|
||||
host = '0.0.0.0'
|
||||
hostname = '0.0.0.0'
|
||||
port = 80
|
||||
```
|
||||
|
||||
@@ -31,7 +31,7 @@ port = 80
|
||||
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
|
||||
|
||||
```toml
|
||||
host = '0.0.0.0'
|
||||
hostname = '0.0.0.0'
|
||||
port = 443
|
||||
certificate = './certificate.pem'
|
||||
key = './key.pem'
|
||||
@@ -43,10 +43,10 @@ key = './key.pem'
|
||||
|
||||
If you want to redirect everything to HTTPS, you can modify the configuration like this:
|
||||
|
||||
```
|
||||
```toml
|
||||
# If set to true, all HTTP traffic will be redirected to the first HTTPs configuration.
|
||||
|
||||
redirectToHttps: true
|
||||
redirectToHttps = true
|
||||
```
|
||||
|
||||
This should be written just before the `mount` option, inside the `http:` block.
|
||||
|
||||
@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
|
||||
|
||||
```
|
||||
$ node -v
|
||||
v8.12.0
|
||||
v8.16.2
|
||||
```
|
||||
|
||||
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
|
||||
@@ -65,17 +65,13 @@ Now you have to create a config file for `xo-server`:
|
||||
|
||||
```
|
||||
$ cd packages/xo-server
|
||||
$ cp sample.config.toml .xo-server.toml
|
||||
$ mkdir -p ~/.config/xo-server
|
||||
$ cp sample.config.toml ~/.config/xo-server/config.toml
|
||||
```
|
||||
|
||||
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory):
|
||||
> Note: If you're installing `xo-server` as a global service, you may want to copy the file to `/etc/xo-server/config.toml` instead.
|
||||
|
||||
```toml
|
||||
[http.mounts]
|
||||
'/' = '../xo-web/dist/'
|
||||
```
|
||||
|
||||
In this config file, you can also change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
|
||||
In this config file, you can change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
|
||||
|
||||
You can try to start xo-server to see if it works. You should have something like this:
|
||||
|
||||
@@ -186,7 +182,7 @@ service redis start
|
||||
|
||||
## SUDO
|
||||
|
||||
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server/.xo-server.toml` and setting `useSudo = true`. It's near the end of the file:
|
||||
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server` configuration file and setting `useSudo = true`. It's near the end of the file:
|
||||
|
||||
```
|
||||
useSudo = true
|
||||
|
||||
@@ -41,6 +41,20 @@ However, if you want to start a manual check, you can do it by clicking on the "
|
||||
|
||||

|
||||
|
||||
#### Release channel
|
||||
In Xen Orchestra, you can make a choice between two different release channels.
|
||||
|
||||
##### Stable
|
||||
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
|
||||
|
||||
##### Latest
|
||||
|
||||
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
|
||||
|
||||
> To select the release channel of your choice, go to the XOA > Updates view.
|
||||
|
||||

|
||||
|
||||
#### Upgrade
|
||||
|
||||
If a new version is found, you'll have an upgrade button and its tooltip displayed:
|
||||
|
||||
72
docs/web-hooks.md
Normal file
72
docs/web-hooks.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# Web hooks
|
||||
|
||||
⚠ This feature is experimental!
|
||||
|
||||
## Configuration
|
||||
|
||||
The plugin "web-hooks" needs to be installed and loaded for this feature to work.
|
||||
|
||||
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called.
|
||||
|
||||
* Go to Settings > Plugins > Web hooks
|
||||
* Add new hooks
|
||||
* For each hook, configure:
|
||||
* Method: the XO API method that will trigger the HTTP request when called
|
||||
* Type:
|
||||
* pre: the request will be sent when the method is called
|
||||
* post: the request will be sent after the method action is completed
|
||||
* pre/post: both
|
||||
* URL: the full URL which the requests will be sent to
|
||||
* Save the plugin configuration
|
||||
|
||||
From now on, a request will be sent to the corresponding URLs when a configured method is called by an XO client.
|
||||
|
||||
## Request content
|
||||
|
||||
```
|
||||
POST / HTTP/1.1
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
The request's body is a JSON string representing an object with the following properties:
|
||||
|
||||
- `type`: `"pre"` or `"post"`
|
||||
- `callId`: unique ID for this call to help match a pre-call and a post-call
|
||||
- `userId`: unique internal ID of the user who performed the call
|
||||
- `userName`: login/e-mail address of the user who performed the call
|
||||
- `method`: name of the method that was called (e.g. `"vm.start"`)
|
||||
- `params`: call parameters (object)
|
||||
- `timestamp`: epoch timestamp of the beginning ("pre") or end ("post") of the call in ms
|
||||
- `duration`: duration of the call in ms ("post" hooks only)
|
||||
- `result`: call result on success ("post" hooks only)
|
||||
- `error`: call result on error ("post" hooks only)
|
||||
|
||||
## Request handling
|
||||
|
||||
*Quick Node.js example of how you may want to handle the requests*
|
||||
|
||||
```js
|
||||
const http = require('http')
|
||||
const { exec } = require('child_process')
|
||||
|
||||
http
|
||||
.createServer((req, res) => {
|
||||
let body = ''
|
||||
req.on('data', chunk => {
|
||||
body += chunk
|
||||
})
|
||||
req.on('end', () => handleHook(body))
|
||||
res.end()
|
||||
})
|
||||
.listen(3000)
|
||||
|
||||
const handleHook = data => {
|
||||
const { method, params, type, result, error, timestamp } = JSON.parse(data)
|
||||
|
||||
// Log it
|
||||
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${result || error}`)
|
||||
|
||||
// Run scripts
|
||||
exec(`./hook-scripts/${method}-${type}.sh`)
|
||||
}
|
||||
```
|
||||
54
docs/xoa.md
54
docs/xoa.md
@@ -22,9 +22,9 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
|
||||
|
||||
### The quickest way
|
||||
|
||||
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
|
||||
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go to https://xen-orchestra.com/#!/xoa and follow the instructions.
|
||||
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
> **Note:** no data will be sent to our servers, the deployment only runs between your browser and your host!
|
||||
|
||||

|
||||
|
||||
@@ -41,12 +41,12 @@ bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
Follow the instructions:
|
||||
|
||||
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS server should be provided.
|
||||
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
|
||||
|
||||
### Via download the XVA
|
||||
### Via a manual XVA download
|
||||
|
||||
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
|
||||
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
|
||||
|
||||
@@ -64,6 +64,35 @@ Once you have started the VM, you can access the web UI by putting the IP you co
|
||||
|
||||
**The first thing** you need to do with your XOA is register. [Read the documentation on the page dedicated to the updater/register inferface](updater.md).
|
||||
|
||||
## Technical Support
|
||||
|
||||
In your appliance, you can access the support section in the XOA menu. In this section you can:
|
||||
|
||||
* launch an `xoa check` command
|
||||
|
||||

|
||||
|
||||
* Open a secure support tunnel so our team can remotely investigate
|
||||
|
||||

|
||||
|
||||
<a id="ssh-pro-support"></a>
|
||||
|
||||
If your web UI is not working, you can also open the secure support tunnel from the CLI. To open a private tunnel (we are the only one with the private key), you can use the command `xoa support tunnel` like below:
|
||||
|
||||
```
|
||||
$ xoa support tunnel
|
||||
The support tunnel has been created.
|
||||
|
||||
Do not stop this command before the intervention is over!
|
||||
Give this id to the support: 40713
|
||||
```
|
||||
|
||||
Give us this number, and we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
|
||||
|
||||
> The tunnel utilizes the user `xoa-support`. If you want to deactivate this bundled user, you can run `chage -E 0 xoa-support`. To re-activate this account, you must run `chage -E 1 xoa-support`.
|
||||
|
||||
|
||||
### First console connection
|
||||
|
||||
If you connect via SSH or console, the default credentials are:
|
||||
@@ -156,21 +185,6 @@ You can access the VM console through XenCenter or using VNC through a SSH tunne
|
||||
|
||||
If you want to go back in DHCP, just run `xoa network dhcp`
|
||||
|
||||
### SSH Pro Support
|
||||
|
||||
By default, if you need support, there is a dedicated user named `xoa-support`. We are the only one with the private key. If you want our assistance on your XOA, you can open a private tunnel with the command `xoa support tunnel` like below:
|
||||
|
||||
```
|
||||
$ xoa support tunnel
|
||||
The support tunnel has been created.
|
||||
|
||||
Do not stop this command before the intervention is over!
|
||||
Give this id to the support: 40713
|
||||
```
|
||||
|
||||
Give us this number, we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
|
||||
|
||||
> If you want to deactivate this bundled user, you can type `chage -E 0 xoa-support`. To re-activate this account, you must use the `chage -E 1 xoa-support`.
|
||||
|
||||
### Firewall
|
||||
|
||||
|
||||
@@ -12,18 +12,18 @@
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-node": "^10.0.0",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.106.3",
|
||||
"flow-bin": "^0.112.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -60,6 +60,7 @@
|
||||
"posttest": "scripts/run-script test",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\"",
|
||||
"travis-tests": "scripts/travis-tests"
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -24,25 +24,25 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"cli-progress": "^3.1.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.7.0"
|
||||
"vhd-lib": "^0.7.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^3.2.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.7.0",
|
||||
"version": "0.7.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -18,15 +18,17 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"lodash": "^4.17.4",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -35,10 +37,10 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^3.2.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
|
||||
@@ -17,10 +17,7 @@ export default async function readChunk(stream, n) {
|
||||
resolve(Buffer.concat(chunks, i))
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve2()
|
||||
clean()
|
||||
}
|
||||
const onEnd = resolve2
|
||||
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
@@ -34,8 +31,11 @@ export default async function readChunk(stream, n) {
|
||||
}
|
||||
i += chunk.length
|
||||
chunks.push(chunk)
|
||||
if (i >= n) {
|
||||
|
||||
if (i === n) {
|
||||
resolve2()
|
||||
} else if (i > n) {
|
||||
throw new RangeError(`read (${i}) more than expected (${n})`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,13 +29,13 @@ export default asyncIteratorToStream(async function*(size, blockParser) {
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.offsetBytes - position
|
||||
const paddingLength = next.logicalAddressBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield* filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.offsetBytes + next.data.length
|
||||
position = next.logicalAddressBytes + next.data.length
|
||||
}
|
||||
yield* filePadding(actualSize - position)
|
||||
yield footer
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { forEachRight } from 'lodash'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
@@ -17,38 +18,65 @@ import { set as setBitmap } from './_bitmap'
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* Looks once backwards to collect the last fragment of each VHD block (they could be interleaved),
|
||||
* then allocates the blocks in a forwards pass.
|
||||
* @returns currentVhdPositionSector the first free sector after the data
|
||||
*/
|
||||
function createBAT(
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
fragmentLogicAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
|
||||
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
const lastFragmentPerBlock = new Map()
|
||||
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
|
||||
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
|
||||
const vhdTableIndex = Math.floor(
|
||||
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
|
||||
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
|
||||
}
|
||||
})
|
||||
return currentVhdPositionSector
|
||||
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
|
||||
// lastFragmentPerBlock is from last to first, so we go the other way around
|
||||
forEachRight(
|
||||
lastFragmentPerBlockArray,
|
||||
([vhdTableIndex, _fragmentVirtualAddress]) => {
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
)
|
||||
return [currentVhdPositionSector, lastFragmentPerBlock]
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives an iterator of constant sized fragments, and a list of their address in virtual space, and returns
|
||||
* a stream representing the VHD file of this disk.
|
||||
* The fragment size should be an integer divider of the VHD block size.
|
||||
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
|
||||
* @param diskSize
|
||||
* @param fragmentSize
|
||||
* @param fragmentLogicalAddressList
|
||||
* @param fragmentIterator
|
||||
* @returns {Promise<Function>}
|
||||
*/
|
||||
|
||||
export default async function createReadableStream(
|
||||
diskSize,
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
fragmentSize,
|
||||
fragmentLogicalAddressList,
|
||||
fragmentIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
`Can't import file, grain size (${fragmentSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
@@ -80,60 +108,72 @@ export default async function createReadableStream(
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
const endOfData = createBAT(
|
||||
const [endOfData, lastFragmentPerBlock] = createBAT(
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
fragmentLogicalAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
)
|
||||
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
|
||||
let position = 0
|
||||
function* yieldAndTrack(buffer, expectedPosition) {
|
||||
|
||||
function* yieldAndTrack(buffer, expectedPosition, reason) {
|
||||
if (expectedPosition !== undefined) {
|
||||
assert.strictEqual(position, expectedPosition)
|
||||
assert.strictEqual(position, expectedPosition, reason)
|
||||
}
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
position += buffer.length
|
||||
}
|
||||
}
|
||||
async function* generateFileContent(blockIterator, bitmapSize, ratio) {
|
||||
let currentBlock = -1
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockWithBitmap = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
currentBlock++
|
||||
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
|
||||
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batIndex !== currentVhdBlockIndex) {
|
||||
if (currentVhdBlockIndex >= 0) {
|
||||
yield* yieldAndTrack(
|
||||
currentBlockWithBitmap,
|
||||
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
|
||||
)
|
||||
}
|
||||
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
|
||||
currentVhdBlockIndex = batIndex
|
||||
}
|
||||
const blockOffset =
|
||||
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
|
||||
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
|
||||
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
|
||||
}
|
||||
next.data.copy(
|
||||
currentBlockWithBitmap,
|
||||
bitmapSize + (next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
)
|
||||
|
||||
function insertFragmentInBlock(fragment, blockWithBitmap) {
|
||||
const fragmentOffsetInBlock =
|
||||
(fragment.logicalAddressBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
|
||||
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
|
||||
setBitmap(blockWithBitmap, fragmentOffsetInBlock + bitPos)
|
||||
}
|
||||
fragment.data.copy(
|
||||
blockWithBitmap,
|
||||
bitmapSize + (fragment.logicalAddressBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
)
|
||||
}
|
||||
|
||||
async function* generateBlocks(fragmentIterator, bitmapSize) {
|
||||
let currentFragmentIndex = -1
|
||||
// store blocks waiting for some of their fragments.
|
||||
const batIndexToBlockMap = new Map()
|
||||
for await (const fragment of fragmentIterator) {
|
||||
currentFragmentIndex++
|
||||
const batIndex = Math.floor(
|
||||
fragment.logicalAddressBytes / VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
let currentBlockWithBitmap = batIndexToBlockMap.get(batIndex)
|
||||
if (currentBlockWithBitmap === undefined) {
|
||||
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
|
||||
batIndexToBlockMap.set(batIndex, currentBlockWithBitmap)
|
||||
}
|
||||
insertFragmentInBlock(fragment, currentBlockWithBitmap)
|
||||
const batEntry = bat.readUInt32BE(batIndex * 4)
|
||||
assert.notStrictEqual(batEntry, BLOCK_UNUSED)
|
||||
const batPosition = batEntry * SECTOR_SIZE
|
||||
if (lastFragmentPerBlock.get(batIndex) === fragment.logicalAddressBytes) {
|
||||
batIndexToBlockMap.delete(batIndex)
|
||||
yield* yieldAndTrack(
|
||||
currentBlockWithBitmap,
|
||||
batPosition,
|
||||
`VHD block start index: ${currentFragmentIndex}`
|
||||
)
|
||||
}
|
||||
}
|
||||
yield* yieldAndTrack(currentBlockWithBitmap)
|
||||
}
|
||||
|
||||
async function* iterator() {
|
||||
yield* yieldAndTrack(footer, 0)
|
||||
yield* yieldAndTrack(header, FOOTER_SIZE)
|
||||
yield* yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
|
||||
yield* generateFileContent(blockIterator, bitmapSize, ratio)
|
||||
yield* generateBlocks(fragmentIterator, bitmapSize)
|
||||
yield* yieldAndTrack(footer)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import resolveRelativeFromFile from './_resolveRelativeFromFile'
|
||||
|
||||
@@ -13,18 +14,23 @@ import {
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
export default async function createSyntheticStream(handler, path) {
|
||||
const { warn } = createLogger('vhd-lib:createSyntheticStream')
|
||||
|
||||
export default async function createSyntheticStream(handler, paths) {
|
||||
const fds = []
|
||||
const cleanup = () => {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
warn('error while closing file', {
|
||||
error,
|
||||
fd: fds[i],
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const open = async path => {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
@@ -32,11 +38,18 @@ export default async function createSyntheticStream(handler, path) {
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
return vhd
|
||||
}
|
||||
if (typeof paths === 'string') {
|
||||
let path = paths
|
||||
let vhd
|
||||
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
} else {
|
||||
for (const path of paths) {
|
||||
await open(path)
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
|
||||
@@ -6,11 +6,8 @@ export { default as chainVhd } from './chain'
|
||||
export { default as checkVhdChain } from './checkChain'
|
||||
export { default as createContentStream } from './createContentStream'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createReadableSparseStream } from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
export {
|
||||
default as createVhdStreamWithLength,
|
||||
} from './createVhdStreamWithLength'
|
||||
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
|
||||
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
|
||||
|
||||
10
packages/vhd-lib/src/peekFooterFromVhdStream.js
Normal file
10
packages/vhd-lib/src/peekFooterFromVhdStream.js
Normal file
@@ -0,0 +1,10 @@
|
||||
import readChunk from './_readChunk'
|
||||
import { FOOTER_SIZE } from './_constants'
|
||||
import { fuFooter } from './_structs'
|
||||
|
||||
export default async function peekFooterFromStream(stream) {
|
||||
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
stream.unshift(footerBuffer)
|
||||
return footer
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
import assert from 'assert'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
@@ -15,10 +16,7 @@ import {
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
const { debug } = createLogger('vhd-lib:Vhd')
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
@@ -40,9 +38,11 @@ const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
const expected = checksumStruct(buf, struct)
|
||||
if (actual !== expected) {
|
||||
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
|
||||
}
|
||||
assert.strictEqual(
|
||||
actual,
|
||||
expected,
|
||||
`invalid ${name} checksum ${actual}, expected ${expected}`
|
||||
)
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
@@ -102,7 +102,7 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
getEndOfHeaders() {
|
||||
_getEndOfHeaders() {
|
||||
const { header } = this
|
||||
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
@@ -127,8 +127,8 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData() {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
_getEndOfData() {
|
||||
let end = Math.ceil(this._getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
@@ -309,8 +309,8 @@ export default class Vhd {
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock(blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
async _createBlock(blockId) {
|
||||
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
@@ -325,7 +325,7 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Write a bitmap at a block address.
|
||||
async writeBlockBitmap(blockAddr, bitmap) {
|
||||
async _writeBlockBitmap(blockAddr, bitmap) {
|
||||
const { bitmapSize } = this
|
||||
|
||||
if (bitmap.length !== bitmapSize) {
|
||||
@@ -342,20 +342,20 @@ export default class Vhd {
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeEntireBlock(block) {
|
||||
async _writeEntireBlock(block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
blockAddr = await this._createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
|
||||
async _writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
blockAddr = await this._createBlock(block.id)
|
||||
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
|
||||
} else if (parentBitmap === undefined) {
|
||||
parentBitmap = (await this._readBlock(block.id, true)).bitmap
|
||||
@@ -364,14 +364,14 @@ export default class Vhd {
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
`_writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
@@ -407,12 +407,12 @@ export default class Vhd {
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this.writeEntireBlock(block)
|
||||
await this._writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
await this._writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
@@ -429,7 +429,7 @@ export default class Vhd {
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
const eof = await this._handler.getSize(this._path)
|
||||
// sometimes the file is longer than anticipated, we still need to put the footer at the end
|
||||
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
|
||||
const offset = Math.max(this._getEndOfData(), eof - rawFooter.length)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
@@ -500,7 +500,7 @@ export default class Vhd {
|
||||
endInBuffer
|
||||
)
|
||||
}
|
||||
await this.writeBlockSectors(
|
||||
await this._writeBlockSectors(
|
||||
{ id: currentBlock, data: inputBuffer },
|
||||
offsetInBlockSectors,
|
||||
endInBlockSectors
|
||||
@@ -509,7 +509,7 @@ export default class Vhd {
|
||||
await this.writeFooter()
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators(neededSectors) {
|
||||
async _ensureSpaceForParentLocators(neededSectors) {
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
@@ -528,7 +528,7 @@ export default class Vhd {
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
const position = await this._ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
|
||||
@@ -31,11 +31,11 @@ test('createFooter() does not crash', () => {
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 100,
|
||||
logicalAddressBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 700,
|
||||
logicalAddressBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -62,11 +62,11 @@ test('ReadableRawVHDStream does not crash', async () => {
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 700,
|
||||
logicalAddressBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 100,
|
||||
logicalAddressBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -97,11 +97,11 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
offsetBytes: blockSize * 3,
|
||||
logicalAddressBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: blockSize * 100,
|
||||
logicalAddressBytes: blockSize * 100,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -109,7 +109,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const stream = await createReadableSparseStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
blocks.map(b => b.offsetBytes),
|
||||
blocks.map(b => b.logicalAddressBytes),
|
||||
blocks
|
||||
)
|
||||
expect(stream.length).toEqual(4197888)
|
||||
@@ -128,7 +128,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
b.data.copy(expected, b.logicalAddressBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
|
||||
@@ -36,19 +36,19 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"archy": "^1.0.0",
|
||||
"chalk": "^2.3.2",
|
||||
"chalk": "^3.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.27.2"
|
||||
"xen-api": "^0.27.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.5",
|
||||
"@babel/preset-env": "^7.1.5",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"cross-env": "^5.1.4",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -4,6 +4,7 @@ process.env.DEBUG = '*'
|
||||
|
||||
const defer = require('golike-defer').default
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
|
||||
const { createClient } = require('../')
|
||||
|
||||
@@ -32,8 +33,13 @@ defer(async ($defer, args) => {
|
||||
const { cancel, token } = CancelToken.source()
|
||||
process.on('SIGINT', cancel)
|
||||
|
||||
let input = createInputStream(args[2])
|
||||
if (!raw && input.length === undefined) {
|
||||
input = await createVhdStreamWithLength(input)
|
||||
}
|
||||
|
||||
// https://xapi-project.github.io/xen-api/snapshots.html#uploading-a-disk-or-snapshot
|
||||
await xapi.putResource(token, createInputStream(args[2]), '/import_raw_vdi/', {
|
||||
await xapi.putResource(token, input, '/import_raw_vdi/', {
|
||||
query: {
|
||||
format: raw ? 'raw' : 'vhd',
|
||||
vdi: await resolveRef(xapi, 'VDI', args[1])
|
||||
|
||||
121
packages/xen-api/examples/package-lock.json
generated
121
packages/xen-api/examples/package-lock.json
generated
@@ -2,6 +2,28 @@
|
||||
"requires": true,
|
||||
"lockfileVersion": 1,
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": {
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@xen-orchestra/log/-/log-0.2.0.tgz",
|
||||
"integrity": "sha512-xNseJ/TIUdASm9uxr0zVvg8qDG+Xw6ycJy4dag+e1yl6pEr77GdPJD2R0JbE1BbZwup/Skh3TEh6L0GV+9NRdQ==",
|
||||
"requires": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
}
|
||||
},
|
||||
"async-iterator-to-stream": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/async-iterator-to-stream/-/async-iterator-to-stream-1.1.0.tgz",
|
||||
"integrity": "sha512-ddF3u7ipixenFJsYCKqVR9tNdkIzd2j7JVg8QarqkfUl7UTR7nhJgc1Q+3ebP/5DNFhV9Co9F47FJjGpdc0PjQ==",
|
||||
"requires": {
|
||||
"readable-stream": "^3.0.5"
|
||||
}
|
||||
},
|
||||
"core-js": {
|
||||
"version": "3.4.1",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.4.1.tgz",
|
||||
"integrity": "sha512-KX/dnuY/J8FtEwbnrzmAjUYgLqtk+cxM86hfG60LGiW3MmltIc2yAmDgBgEkfm0blZhUrdr1Zd84J2Y14mLxzg=="
|
||||
},
|
||||
"core-util-is": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
|
||||
@@ -24,6 +46,41 @@
|
||||
"node-gyp-build": "^3.7.0"
|
||||
}
|
||||
},
|
||||
"from2": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
|
||||
"integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
|
||||
"requires": {
|
||||
"inherits": "^2.0.1",
|
||||
"readable-stream": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"readable-stream": {
|
||||
"version": "2.3.6",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
|
||||
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
|
||||
"requires": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fs-extra": {
|
||||
"version": "8.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
|
||||
"integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
|
||||
"requires": {
|
||||
"graceful-fs": "^4.2.0",
|
||||
"jsonfile": "^4.0.0",
|
||||
"universalify": "^0.1.0"
|
||||
}
|
||||
},
|
||||
"getopts": {
|
||||
"version": "2.2.5",
|
||||
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
|
||||
@@ -34,6 +91,11 @@
|
||||
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
|
||||
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
|
||||
},
|
||||
"graceful-fs": {
|
||||
"version": "4.2.3",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
|
||||
"integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
|
||||
},
|
||||
"human-format": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
|
||||
@@ -49,6 +111,24 @@
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
||||
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
|
||||
},
|
||||
"jsonfile": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
|
||||
"integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
|
||||
"requires": {
|
||||
"graceful-fs": "^4.1.6"
|
||||
}
|
||||
},
|
||||
"limit-concurrency-decorator": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/limit-concurrency-decorator/-/limit-concurrency-decorator-0.4.0.tgz",
|
||||
"integrity": "sha512-hXGTuCkYjosfHT1D7dcPKzPHSGwBtZfN0wummzDwxi5A3ZUNBB75qM8phKEjQGlQGAfYrMW/JqhbaljO3xOH0A=="
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.15",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
|
||||
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
|
||||
},
|
||||
"make-error": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
|
||||
@@ -141,6 +221,11 @@
|
||||
"safe-buffer": "~5.1.0"
|
||||
}
|
||||
},
|
||||
"struct-fu": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/struct-fu/-/struct-fu-1.2.1.tgz",
|
||||
"integrity": "sha512-QrtfoBRe+RixlBJl852/Gu7tLLTdx3kWs3MFzY1OHNrSsYYK7aIAnzqsncYRWrKGG/QSItDmOTlELMxehw4Gjw=="
|
||||
},
|
||||
"throttle": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
|
||||
@@ -175,11 +260,47 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"universalify": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
|
||||
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
|
||||
},
|
||||
"util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
|
||||
},
|
||||
"uuid": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz",
|
||||
"integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ=="
|
||||
},
|
||||
"vhd-lib": {
|
||||
"version": "0.7.1",
|
||||
"resolved": "https://registry.npmjs.org/vhd-lib/-/vhd-lib-0.7.1.tgz",
|
||||
"integrity": "sha512-TODzo7KjtNzYF/NuJjE5bPeGyXZIUzAOVJvED1dcPXr8iSnS6/U5aNdtKahBVwukEzf0/x+Cu3GMYutV4/cxsQ==",
|
||||
"requires": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.14.0.tgz",
|
||||
"integrity": "sha512-VV5lXK4lXaPB9oBO50ope1qd0AKN8N3nK14jYvV9/qFmfZW2Px/bJjPZBniGjXcIJf6J5Y/coNgJtPHDyiUV/g==",
|
||||
"requires": {
|
||||
"make-error": "^1.3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3"
|
||||
"throttle": "^1.0.3",
|
||||
"vhd-lib": "^0.7.2"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.27.2",
|
||||
"version": "0.27.3",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -60,7 +60,7 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
|
||||
import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { filter, find } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
@@ -110,7 +110,7 @@ const main = async args => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (isArray(value) ? Promise.all(value) : value)),
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
@@ -4,7 +4,7 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import { EventEmitter } from 'events'
|
||||
import { isArray, map, noop, omit } from 'lodash'
|
||||
import { map, noop, omit } from 'lodash'
|
||||
import {
|
||||
cancelable,
|
||||
defer,
|
||||
@@ -25,7 +25,6 @@ import isReadOnlyCall from './_isReadOnlyCall'
|
||||
import makeCallSetting from './_makeCallSetting'
|
||||
import parseUrl from './_parseUrl'
|
||||
import replaceSensitiveValues from './_replaceSensitiveValues'
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -113,7 +112,7 @@ export class Xapi extends EventEmitter {
|
||||
this._watchedTypes = undefined
|
||||
const { watchEvents } = opts
|
||||
if (watchEvents !== false) {
|
||||
if (isArray(watchEvents)) {
|
||||
if (Array.isArray(watchEvents)) {
|
||||
this._watchedTypes = watchEvents
|
||||
}
|
||||
this.watchEvents()
|
||||
@@ -626,9 +625,7 @@ export class Xapi extends EventEmitter {
|
||||
kindOf(result)
|
||||
)
|
||||
return result
|
||||
} catch (e) {
|
||||
const error = e instanceof Error ? e : XapiError.wrap(e)
|
||||
|
||||
} catch (error) {
|
||||
// do not log the session ID
|
||||
//
|
||||
// TODO: should log at the session level to avoid logging sensitive
|
||||
@@ -743,9 +740,9 @@ export class Xapi extends EventEmitter {
|
||||
// the event loop in that case
|
||||
if (this._pool.$ref !== oldPoolRef) {
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
const types = (this._types = (
|
||||
await this._interruptOnDisconnect(this._call('system.listMethods'))
|
||||
)
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
@@ -1075,7 +1072,7 @@ export class Xapi extends EventEmitter {
|
||||
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
|
||||
|
||||
const value = data[field]
|
||||
if (isArray(value)) {
|
||||
if (Array.isArray(value)) {
|
||||
if (value.length === 0 || isOpaqueRef(value[0])) {
|
||||
getters[$field] = function() {
|
||||
const value = this[field]
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import httpRequestPlus from 'http-request-plus'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
|
||||
@@ -30,7 +32,7 @@ export default ({ allowUnauthorized, url }) => {
|
||||
return response.result
|
||||
}
|
||||
|
||||
throw response.error
|
||||
throw XapiError.wrap(response.error)
|
||||
},
|
||||
error => {
|
||||
if (error.response !== undefined) {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
@@ -33,7 +35,7 @@ const parseResult = result => {
|
||||
}
|
||||
|
||||
if (status !== 'Success') {
|
||||
throw result.ErrorDescription
|
||||
throw XapiError.wrap(result.ErrorDescription)
|
||||
}
|
||||
|
||||
const value = result.Value
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
|
||||
const logError = error => {
|
||||
@@ -26,7 +28,7 @@ const parseResult = result => {
|
||||
}
|
||||
|
||||
if (status !== 'Success') {
|
||||
throw result.ErrorDescription
|
||||
throw XapiError.wrap(result.ErrorDescription)
|
||||
}
|
||||
|
||||
return result.Value
|
||||
|
||||
@@ -26,28 +26,28 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "^7.0.0",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^2.2.0",
|
||||
"chalk": "^3.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^3.1.3",
|
||||
"micromatch": "^4.0.2",
|
||||
"mkdirp": "^0.5.1",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"pretty-ms": "^5.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -56,7 +56,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -7,7 +7,6 @@ const promisify = require('bluebird').promisify
|
||||
const readFile = promisify(require('fs').readFile)
|
||||
const writeFile = promisify(require('fs').writeFile)
|
||||
|
||||
const assign = require('lodash/assign')
|
||||
const l33t = require('l33teral')
|
||||
const mkdirp = promisify(require('mkdirp'))
|
||||
const xdgBasedir = require('xdg-basedir')
|
||||
@@ -41,7 +40,7 @@ const save = (exports.save = function(config) {
|
||||
|
||||
exports.set = function(data) {
|
||||
return load().then(function(config) {
|
||||
return save(assign(config, data))
|
||||
return save(Object.assign(config, data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ const getKeys = require('lodash/keys')
|
||||
const hrp = require('http-request-plus').default
|
||||
const humanFormat = require('human-format')
|
||||
const identity = require('lodash/identity')
|
||||
const isArray = require('lodash/isArray')
|
||||
const isObject = require('lodash/isObject')
|
||||
const micromatch = require('micromatch')
|
||||
const nicePipe = require('nice-pipe')
|
||||
@@ -298,7 +297,11 @@ async function listCommands(args) {
|
||||
str.push(
|
||||
name,
|
||||
'=<',
|
||||
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
|
||||
type == null
|
||||
? 'unknown type'
|
||||
: Array.isArray(type)
|
||||
? type.join('|')
|
||||
: type,
|
||||
'>'
|
||||
)
|
||||
|
||||
@@ -383,7 +386,7 @@ async function call(args) {
|
||||
printProgress
|
||||
)
|
||||
|
||||
return fromCallback(cb => pump(response, progress, output, cb))
|
||||
return fromCallback(pump, response, progress, output)
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -260,7 +260,10 @@ describe('Collection', function() {
|
||||
forEach(
|
||||
{
|
||||
'add & update → add': [
|
||||
[['add', 'foo', 0], ['update', 'foo', 1]],
|
||||
[
|
||||
['add', 'foo', 0],
|
||||
['update', 'foo', 1],
|
||||
],
|
||||
{
|
||||
add: {
|
||||
foo: 1,
|
||||
@@ -268,10 +271,19 @@ describe('Collection', function() {
|
||||
},
|
||||
],
|
||||
|
||||
'add & remove → ∅': [[['add', 'foo', 0], ['remove', 'foo']], {}],
|
||||
'add & remove → ∅': [
|
||||
[
|
||||
['add', 'foo', 0],
|
||||
['remove', 'foo'],
|
||||
],
|
||||
{},
|
||||
],
|
||||
|
||||
'update & update → update': [
|
||||
[['update', 'bar', 1], ['update', 'bar', 2]],
|
||||
[
|
||||
['update', 'bar', 1],
|
||||
['update', 'bar', 2],
|
||||
],
|
||||
{
|
||||
update: {
|
||||
bar: 2,
|
||||
@@ -280,7 +292,10 @@ describe('Collection', function() {
|
||||
],
|
||||
|
||||
'update & remove → remove': [
|
||||
[['update', 'bar', 1], ['remove', 'bar']],
|
||||
[
|
||||
['update', 'bar', 1],
|
||||
['remove', 'bar'],
|
||||
],
|
||||
{
|
||||
remove: {
|
||||
bar: undefined,
|
||||
@@ -289,7 +304,10 @@ describe('Collection', function() {
|
||||
],
|
||||
|
||||
'remove & add → update': [
|
||||
[['remove', 'bar'], ['add', 'bar', 0]],
|
||||
[
|
||||
['remove', 'bar'],
|
||||
['add', 'bar', 0],
|
||||
],
|
||||
{
|
||||
update: {
|
||||
bar: 0,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { bind, iteratee } from 'lodash'
|
||||
import iteratee from 'lodash/iteratee'
|
||||
|
||||
import clearObject from './clear-object'
|
||||
import isEmpty from './is-empty'
|
||||
@@ -17,9 +17,9 @@ export default class Index {
|
||||
this._keysToHash = Object.create(null)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
}
|
||||
|
||||
// This method is used to compute the hash under which an item must
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { bind, iteratee } from 'lodash'
|
||||
import iteratee from 'lodash/iteratee'
|
||||
|
||||
import clearObject from './clear-object'
|
||||
import NotImplemented from './not-implemented'
|
||||
@@ -16,9 +16,9 @@ export default class UniqueIndex {
|
||||
this._keysToHash = Object.create(null)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
}
|
||||
|
||||
// This method is used to compute the hash under which an item must
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { bind, forEach, iteratee as createCallback } from 'lodash'
|
||||
import createCallback from 'lodash/iteratee'
|
||||
import forEach from 'lodash/forEach'
|
||||
|
||||
import Collection, {
|
||||
ACTION_ADD,
|
||||
@@ -19,9 +20,9 @@ export default class View extends Collection {
|
||||
this._onAdd(this._collection.all)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
|
||||
// Register listeners.
|
||||
this._collection.on(ACTION_ADD, this._onAdd)
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { isArray, iteratee } from 'lodash'
|
||||
import { iteratee } from 'lodash'
|
||||
|
||||
class XoError extends BaseError {
|
||||
constructor({ code, message, data }) {
|
||||
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
|
||||
}))
|
||||
|
||||
export const invalidParameters = create(10, (message, errors) => {
|
||||
if (isArray(message)) {
|
||||
if (Array.isArray(message)) {
|
||||
errors = message
|
||||
message = undefined
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"@types/node": "^12.0.2",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
"tslint-config-standard": "^9.0.0",
|
||||
"typescript": "^3.1.6"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# xo-remote-parser [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-remote-parser):
|
||||
|
||||
```
|
||||
> npm install --save ${pkg.name}
|
||||
> npm install --save xo-remote-parser
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](${pkg.bugs})
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
AGPL-3.0 © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.6.5",
|
||||
"version": "0.6.6",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -39,14 +39,14 @@
|
||||
"inquirer": "^7.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* eslint no-throw-literal: 0 */
|
||||
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { bind, noop } from 'lodash'
|
||||
import noop from 'lodash/noop'
|
||||
import { createClient } from 'ldapjs'
|
||||
import { escape } from 'ldapjs/lib/filters/escape'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
@@ -9,6 +9,11 @@ import { readFile } from 'fs'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const DEFAULTS = {
|
||||
checkCertificate: true,
|
||||
filter: '(uid={{name}})',
|
||||
}
|
||||
|
||||
const VAR_RE = /\{\{([^}]+)\}\}/g
|
||||
const evalFilter = (filter, vars) =>
|
||||
filter.replace(VAR_RE, (_, name) => {
|
||||
@@ -43,7 +48,7 @@ If not specified, it will use a default set of well-known CAs.
|
||||
description:
|
||||
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
defaults: DEFAULTS.checkCertificate,
|
||||
},
|
||||
bind: {
|
||||
description: 'Credentials to use before looking for the user record.',
|
||||
@@ -76,6 +81,11 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
|
||||
description: `
|
||||
Filter used to find the user.
|
||||
|
||||
For LDAP if you want to filter for a special group you can try
|
||||
something like:
|
||||
|
||||
- \`(&(uid={{name}})(memberOf=<group DN>))\`
|
||||
|
||||
For Microsoft Active Directory, you can try one of the following filters:
|
||||
|
||||
- \`(cn={{name}})\`
|
||||
@@ -83,13 +93,12 @@ For Microsoft Active Directory, you can try one of the following filters:
|
||||
- \`(sAMAccountName={{name}}@<domain>)\` (replace \`<domain>\` by your own domain)
|
||||
- \`(userPrincipalName={{name}})\`
|
||||
|
||||
For LDAP if you want to filter for a special group you can try
|
||||
something like:
|
||||
Or something like this if you also want to filter by group:
|
||||
|
||||
- \`(&(uid={{name}})(memberOf=<group DN>))\`
|
||||
- \`(&(sAMAccountName={{name}})(memberOf=<group DN>))\`
|
||||
`.trim(),
|
||||
type: 'string',
|
||||
default: '(uid={{name}})',
|
||||
default: DEFAULTS.filter,
|
||||
},
|
||||
},
|
||||
required: ['uri', 'base'],
|
||||
@@ -116,7 +125,7 @@ class AuthLdap {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
|
||||
this._authenticate = bind(this._authenticate, this)
|
||||
this._authenticate = this._authenticate.bind(this)
|
||||
}
|
||||
|
||||
async configure(conf) {
|
||||
@@ -127,7 +136,11 @@ class AuthLdap {
|
||||
})
|
||||
|
||||
{
|
||||
const { bind, checkCertificate = true, certificateAuthorities } = conf
|
||||
const {
|
||||
bind,
|
||||
checkCertificate = DEFAULTS.checkCertificate,
|
||||
certificateAuthorities,
|
||||
} = conf
|
||||
|
||||
if (bind) {
|
||||
clientOpts.bindDN = bind.dn
|
||||
@@ -147,7 +160,7 @@ class AuthLdap {
|
||||
const {
|
||||
bind: credentials,
|
||||
base: searchBase,
|
||||
filter: searchFilter = '(uid={{name}})',
|
||||
filter: searchFilter = DEFAULTS.filter,
|
||||
} = conf
|
||||
|
||||
this._credentials = credentials
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { bind } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { readFile, writeFile } from 'fs'
|
||||
|
||||
@@ -17,7 +16,7 @@ const CACHE_FILE = './ldap.cache.conf'
|
||||
execPromise(async args => {
|
||||
const config = await promptSchema(
|
||||
configurationSchema,
|
||||
await fromCallback(cb => readFile(CACHE_FILE, 'utf-8', cb)).then(
|
||||
await fromCallback(readFile, CACHE_FILE, 'utf-8').then(
|
||||
JSON.parse,
|
||||
() => ({})
|
||||
)
|
||||
@@ -44,6 +43,6 @@ execPromise(async args => {
|
||||
}),
|
||||
password: await password('Password'),
|
||||
},
|
||||
bind(console.log, console)
|
||||
console.log.bind(console)
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.6.0",
|
||||
"version": "0.7.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -40,7 +40,7 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -2,6 +2,10 @@ import { Strategy } from 'passport-saml'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const DEFAULTS = {
|
||||
disableRequestedAuthnContext: false,
|
||||
}
|
||||
|
||||
export const configurationSchema = {
|
||||
description:
|
||||
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
|
||||
@@ -30,6 +34,11 @@ You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddr
|
||||
`,
|
||||
type: 'string',
|
||||
},
|
||||
disableRequestedAuthnContext: {
|
||||
title: "Don't request an authentication context",
|
||||
description: 'This is known to help when using Active Directory',
|
||||
default: DEFAULTS.disableRequestedAuthnContext,
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
@@ -46,6 +55,7 @@ class AuthSamlXoPlugin {
|
||||
configure({ usernameField, ...conf }) {
|
||||
this._usernameField = usernameField
|
||||
this._conf = {
|
||||
...DEFAULTS,
|
||||
...conf,
|
||||
|
||||
// must match the callback URL
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.16.2",
|
||||
"version": "0.16.4",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,6 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
@@ -48,7 +49,7 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -2,6 +2,7 @@ import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import { get } from '@xen-orchestra/defined'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
@@ -186,7 +187,7 @@ const MARKDOWN_BY_TYPE = {
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
MARKDOWN_BY_TYPE[task.data?.type]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
@@ -317,6 +318,7 @@ class BackupReportsXoPlugin {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
xo,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
@@ -354,7 +356,7 @@ class BackupReportsXoPlugin {
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
success: log.status === 'success',
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
@@ -364,9 +366,10 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
async _ngVmHandler(log, { name: jobName, settings }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const mailReceivers = get(() => settings[''].reportRecipients)
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
@@ -389,8 +392,9 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
@@ -642,11 +646,12 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
return this._sendReport({
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
success: log.status === 'success',
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
@@ -656,12 +661,18 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
_sendReport({
|
||||
mailReceivers = this._mailsReceivers,
|
||||
markdown,
|
||||
nagiosMarkdown,
|
||||
subject,
|
||||
success,
|
||||
}) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
to: mailReceivers,
|
||||
subject,
|
||||
markdown,
|
||||
}),
|
||||
@@ -676,9 +687,14 @@ class BackupReportsXoPlugin {
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: nagiosStatus,
|
||||
status: success ? 0 : 2,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
xo.sendIcinga2Status !== undefined &&
|
||||
xo.sendIcinga2Status({
|
||||
status: success ? 'OK' : 'CRITICAL',
|
||||
message: markdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -708,7 +724,7 @@ class BackupReportsXoPlugin {
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
success: false,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
@@ -904,7 +920,7 @@ class BackupReportsXoPlugin {
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
success: globalSuccess,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
import Client, { createBackoff } from 'jsonrpc-websocket-client'
|
||||
import hrp from 'http-request-plus'
|
||||
|
||||
const WS_URL = 'ws://localhost:9001'
|
||||
const HTTP_URL = 'http://localhost:9002'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class XoServerCloud {
|
||||
constructor({ xo }) {
|
||||
this._xo = xo
|
||||
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._key = null
|
||||
}
|
||||
|
||||
configure(configuration) {
|
||||
this._conf = configuration
|
||||
}
|
||||
|
||||
async load() {
|
||||
const getResourceCatalog = this._getCatalog.bind(this)
|
||||
getResourceCatalog.description =
|
||||
"Get the list of user's available resources"
|
||||
getResourceCatalog.permission = 'admin'
|
||||
getResourceCatalog.params = {
|
||||
filters: { type: 'object', optional: true },
|
||||
}
|
||||
|
||||
const registerResource = ({ namespace }) =>
|
||||
this._registerResource(namespace)
|
||||
registerResource.description = 'Register a resource via cloud plugin'
|
||||
registerResource.params = {
|
||||
namespace: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
registerResource.permission = 'admin'
|
||||
|
||||
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
|
||||
this
|
||||
)
|
||||
|
||||
downloadAndInstallResource.description =
|
||||
'Download and install a resource via cloud plugin'
|
||||
|
||||
downloadAndInstallResource.params = {
|
||||
id: { type: 'string' },
|
||||
namespace: { type: 'string' },
|
||||
version: { type: 'string' },
|
||||
sr: { type: 'string' },
|
||||
}
|
||||
|
||||
downloadAndInstallResource.resolve = {
|
||||
sr: ['sr', 'SR', 'administrate'],
|
||||
}
|
||||
|
||||
downloadAndInstallResource.permission = 'admin'
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
cloud: {
|
||||
downloadAndInstallResource,
|
||||
getResourceCatalog,
|
||||
registerResource,
|
||||
},
|
||||
})
|
||||
this._unsetRequestResource = this._xo.defineProperty(
|
||||
'requestResource',
|
||||
this._requestResource,
|
||||
this
|
||||
)
|
||||
|
||||
const updater = (this._updater = new Client(WS_URL))
|
||||
const connect = () =>
|
||||
updater.open(createBackoff()).catch(error => {
|
||||
console.error('xo-server-cloud: fail to connect to updater', error)
|
||||
|
||||
return connect()
|
||||
})
|
||||
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
|
||||
console.warn('xo-server-cloud: next attempt in %s ms', delay)
|
||||
})
|
||||
connect()
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._unsetApiMethods()
|
||||
this._unsetRequestResource()
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getCatalog({ filters } = {}) {
|
||||
const catalog = await this._updater.call('getResourceCatalog', { filters })
|
||||
|
||||
if (!catalog) {
|
||||
throw new Error('cannot get catalog')
|
||||
}
|
||||
|
||||
return catalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaces() {
|
||||
const catalog = await this._getCatalog()
|
||||
|
||||
if (!catalog._namespaces) {
|
||||
throw new Error('cannot get namespaces')
|
||||
}
|
||||
|
||||
return catalog._namespaces
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _downloadAndInstallResource({ id, namespace, sr, version }) {
|
||||
const stream = await this._requestResource({
|
||||
hub: true,
|
||||
id,
|
||||
namespace,
|
||||
version,
|
||||
})
|
||||
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
|
||||
srId: sr.id,
|
||||
type: 'xva',
|
||||
})
|
||||
await vm.update_other_config({
|
||||
'xo:resource:namespace': namespace,
|
||||
'xo:resource:xva:version': version,
|
||||
'xo:resource:xva:id': id,
|
||||
})
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _registerResource(namespace) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (_namespace === undefined) {
|
||||
throw new Error(`${namespace} is not available`)
|
||||
}
|
||||
|
||||
if (_namespace.registered || _namespace.pending) {
|
||||
throw new Error(`already registered for ${namespace}`)
|
||||
}
|
||||
|
||||
return this._updater.call('registerResource', { namespace })
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaceCatalog({ hub, namespace }) {
|
||||
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
|
||||
namespace
|
||||
]
|
||||
|
||||
if (!namespaceCatalog) {
|
||||
throw new Error(`cannot get catalog: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
return namespaceCatalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _requestResource({ hub = false, id, namespace, version }) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (!hub && (!_namespace || !_namespace.registered)) {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const { _token: token } = await this._getNamespaceCatalog({
|
||||
hub,
|
||||
namespace,
|
||||
})
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
if (!downloadToken) {
|
||||
throw new Error('cannot get download token')
|
||||
}
|
||||
|
||||
const response = await hrp(HTTP_URL, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${downloadToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
// currently needed for XenApi#putResource()
|
||||
response.length = response.headers['content-length']
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new XoServerCloud(opts)
|
||||
@@ -31,7 +31,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.4",
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^2.0.1",
|
||||
"lodash": "^4.17.4"
|
||||
@@ -32,7 +32,7 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const COMPARATOR_FN = {
|
||||
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
assign(result, {
|
||||
Object.assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
assign(result, {
|
||||
Object.assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
@@ -680,7 +680,7 @@ ${entry.listItem}
|
||||
},
|
||||
}
|
||||
if (xapiObject.$type === 'VM') {
|
||||
payload['vm_uuid'] = xapiObject.uuid
|
||||
payload.vm_uuid = xapiObject.uuid
|
||||
}
|
||||
// JSON is not well formed, can't use the default node parser
|
||||
return JSON5.parse(
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.3.0",
|
||||
"version": "0.3.1",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
@@ -25,13 +25,13 @@
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
"cross-env": "^6.0.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.97",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"node-openssl-cert": "^0.0.101",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"uuid": "^3.3.2"
|
||||
},
|
||||
"private": true
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,202 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { filter, find, forOwn, map, sample } from 'lodash'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:private-network')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
|
||||
const createPassword = () =>
|
||||
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class PrivateNetwork {
|
||||
constructor(controller, uuid) {
|
||||
this.controller = controller
|
||||
this.uuid = uuid
|
||||
this.networks = {}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addHost(host) {
|
||||
if (host.$ref === this.center?.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
log.error('No OVSDB client found', {
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const centerClient = this.controller.ovsdbClients[this.center.$ref]
|
||||
if (centerClient === undefined) {
|
||||
log.error('No OVSDB client found for star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
host: this.center.name_label,
|
||||
pool: this.center.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
const centerNetwork = this.networks[this.center.$pool.uuid]
|
||||
const otherConfig = network.other_config
|
||||
const encapsulation =
|
||||
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
|
||||
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
|
||||
const password =
|
||||
otherConfig['xo:sdn-controller:encrypted'] === 'true'
|
||||
? createPassword()
|
||||
: undefined
|
||||
|
||||
let bridgeName
|
||||
try {
|
||||
;[bridgeName] = await Promise.all([
|
||||
hostClient.addInterfaceAndPort(
|
||||
network,
|
||||
centerClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
centerClient.addInterfaceAndPort(
|
||||
centerNetwork,
|
||||
hostClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
])
|
||||
} catch (error) {
|
||||
log.error('Error while connecting host to private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
log.info('Host added', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
addNetwork(network) {
|
||||
this.networks[network.$pool.uuid] = network
|
||||
log.info('Adding network', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
if (this.center === undefined) {
|
||||
return this.electNewCenter()
|
||||
}
|
||||
|
||||
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
|
||||
return Promise.all(
|
||||
map(hosts, async host => {
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
await this.addHost(host)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async electNewCenter() {
|
||||
delete this.center
|
||||
|
||||
// TODO: make it random
|
||||
const hosts = this._getHosts()
|
||||
for (const host of hosts) {
|
||||
const pif = find(host.$PIFs, {
|
||||
network: this.networks[host.$pool.uuid].$ref,
|
||||
})
|
||||
if (pif?.currently_attached && host.$metrics.live) {
|
||||
this.center = host
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (this.center === undefined) {
|
||||
log.error('No available host to elect new star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await this._reset()
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(map(hosts, host => this.addHost(host)))
|
||||
|
||||
log.info('New star-center elected', {
|
||||
center: this.center.name_label,
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
getPools() {
|
||||
const pools = []
|
||||
forOwn(this.networks, network => {
|
||||
pools.push(network.$pool)
|
||||
})
|
||||
return pools
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_reset() {
|
||||
return Promise.all(
|
||||
map(this._getHosts(), async host => {
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
try {
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
} catch (error) {
|
||||
log.error('Error while resetting private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_getHosts() {
|
||||
const hosts = []
|
||||
forOwn(this.networks, network => {
|
||||
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
}
|
||||
@@ -28,8 +28,7 @@ export class OvsdbClient {
|
||||
|
||||
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
|
||||
- `other_config`:
|
||||
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
|
||||
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
|
||||
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
|
||||
|
||||
Attributes on created OVS interfaces:
|
||||
- `options`:
|
||||
@@ -67,55 +66,49 @@ export class OvsdbClient {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
networkUuid,
|
||||
networkName,
|
||||
network,
|
||||
remoteAddress,
|
||||
encapsulation,
|
||||
key,
|
||||
password,
|
||||
remoteNetwork
|
||||
privateNetworkUuid
|
||||
) {
|
||||
if (
|
||||
this._adding.find(
|
||||
elem => elem.id === networkUuid && elem.addr === remoteAddress
|
||||
elem => elem.id === network.uuid && elem.addr === remoteAddress
|
||||
) !== undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
const adding = { id: networkUuid, addr: remoteAddress }
|
||||
const adding = { id: network.uuid, addr: remoteAddress }
|
||||
this._adding.push(adding)
|
||||
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
bridge,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return bridgeName
|
||||
return bridge.name
|
||||
}
|
||||
|
||||
const index = ++this._numberOfPortAndInterface
|
||||
const interfaceName = bridgeName + '_iface' + index
|
||||
const portName = bridgeName + '_port' + index
|
||||
const interfaceName = bridge.name + '_iface' + index
|
||||
const portName = bridge.name + '_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = { remote_ip: remoteAddress, key: key }
|
||||
@@ -139,11 +132,9 @@ export class OvsdbClient {
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: toMap(
|
||||
remoteNetwork !== undefined
|
||||
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
|
||||
: { 'xo:sdn-controller:private-pool-wide': 'true' }
|
||||
),
|
||||
other_config: toMap({
|
||||
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
|
||||
}),
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
@@ -151,7 +142,7 @@ export class OvsdbClient {
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
@@ -163,7 +154,7 @@ export class OvsdbClient {
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
if (jsonObjects === undefined) {
|
||||
socket.destroy()
|
||||
@@ -189,8 +180,8 @@ export class OvsdbClient {
|
||||
details,
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -200,33 +191,24 @@ export class OvsdbClient {
|
||||
log.debug('Port and interface added to bridge', {
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridgeName,
|
||||
network: networkName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
return bridgeName
|
||||
return bridge.name
|
||||
}
|
||||
|
||||
async resetForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
crossPoolOnly,
|
||||
remoteNetwork
|
||||
) {
|
||||
async resetForNetwork(network, privateNetworkUuid) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid === undefined) {
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
socket.destroy()
|
||||
return
|
||||
@@ -250,15 +232,14 @@ export class OvsdbClient {
|
||||
// 2019-09-03
|
||||
// Compatibility code, to be removed in 1 year.
|
||||
const oldShouldDelete =
|
||||
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
|
||||
(config[0] === 'cross_pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
config[0] === 'private_pool_wide' ||
|
||||
config[0] === 'cross_pool' ||
|
||||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
|
||||
config[0] === 'xo:sdn-controller:cross-pool'
|
||||
|
||||
const shouldDelete =
|
||||
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
|
||||
!crossPoolOnly) ||
|
||||
(config[0] === 'xo:sdn-controller:cross-pool' &&
|
||||
(remoteNetwork === undefined || remoteNetwork === config[1]))
|
||||
config[0] === 'xo:sdn-controller:private-network-uuid' &&
|
||||
config[1] === privateNetworkUuid
|
||||
|
||||
if (shouldDelete || oldShouldDelete) {
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
@@ -275,7 +256,7 @@ export class OvsdbClient {
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
@@ -288,7 +269,7 @@ export class OvsdbClient {
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects[0].error,
|
||||
bridge: bridgeName,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -297,7 +278,7 @@ export class OvsdbClient {
|
||||
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridgeName,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
socket.destroy()
|
||||
@@ -335,9 +316,9 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
async _getBridgeForNetwork(network, socket) {
|
||||
const where = [
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
@@ -347,25 +328,17 @@ export class OvsdbClient {
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
log.error('No bridge found for network', {
|
||||
network: networkName,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return []
|
||||
return {}
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
return { uuid: selectResult._uuid[1], name: selectResult.name }
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
return false
|
||||
}
|
||||
@@ -393,8 +366,8 @@ export class OvsdbClient {
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
async _getBridgePorts(bridge, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
@@ -8,5 +8,8 @@
|
||||
"directory": "packages/xo-server-test-plugin",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
withOsAndXenTools = ''
|
||||
# vmToBackup = ''
|
||||
|
||||
[templates]
|
||||
|
||||
6
packages/xo-server-test/src/_defaultValues.js
Normal file
6
packages/xo-server-test/src/_defaultValues.js
Normal file
@@ -0,0 +1,6 @@
|
||||
export const getDefaultName = () => `xo-server-test ${new Date().toISOString()}`
|
||||
|
||||
export const getDefaultSchedule = () => ({
|
||||
name: getDefaultName(),
|
||||
cron: '0 * * * * *',
|
||||
})
|
||||
@@ -2,15 +2,11 @@
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { find, forOwn } from 'lodash'
|
||||
import { defaultsDeep, find, forOwn, pick } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import config from './_config'
|
||||
|
||||
const getDefaultCredentials = () => {
|
||||
const { email, password } = config.xoConnection
|
||||
return { email, password }
|
||||
}
|
||||
import { getDefaultName } from './_defaultValues'
|
||||
|
||||
class XoConnection extends Xo {
|
||||
constructor(opts) {
|
||||
@@ -72,7 +68,10 @@ class XoConnection extends Xo {
|
||||
}
|
||||
|
||||
@defer
|
||||
async connect($defer, credentials = getDefaultCredentials()) {
|
||||
async connect(
|
||||
$defer,
|
||||
credentials = pick(config.xoConnection, 'email', 'password')
|
||||
) {
|
||||
await this.open()
|
||||
$defer.onFailure(() => this.close())
|
||||
|
||||
@@ -111,9 +110,26 @@ class XoConnection extends Xo {
|
||||
}
|
||||
|
||||
async createTempBackupNgJob(params) {
|
||||
const job = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
|
||||
return job
|
||||
// mutate and inject default values
|
||||
defaultsDeep(params, {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
settings: {
|
||||
'': {
|
||||
// it must be enabled because the XAPI might be not able to coalesce VDIs
|
||||
// as fast as the tests run
|
||||
//
|
||||
// see https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
bypassVdiChainsCheck: true,
|
||||
|
||||
// it must be 'never' to avoid race conditions with the plugin `backup-reports`
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
})
|
||||
const id = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id })
|
||||
return this.call('backupNg.getJob', { id })
|
||||
}
|
||||
|
||||
async createTempNetwork(params) {
|
||||
@@ -128,7 +144,7 @@ class XoConnection extends Xo {
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', {
|
||||
name_label: 'XO Test',
|
||||
name_label: getDefaultName(),
|
||||
template: config.templates.templateWithoutDisks,
|
||||
...params,
|
||||
})
|
||||
@@ -138,6 +154,19 @@ class XoConnection extends Xo {
|
||||
})
|
||||
}
|
||||
|
||||
async startTempVm(id, params, withXenTools = false) {
|
||||
await this.call('vm.start', { id, ...params })
|
||||
this._tempResourceDisposers.push('vm.stop', { id, force: true })
|
||||
return this.waitObjectState(id, vm => {
|
||||
if (
|
||||
vm.power_state !== 'Running' ||
|
||||
(withXenTools && vm.xenTools === false)
|
||||
) {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async createTempRemote(params) {
|
||||
const remote = await this.call('remote.create', params)
|
||||
this._tempResourceDisposers.push('remote.delete', { id: remote.id })
|
||||
|
||||
@@ -1,61 +1,6 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Any<Object>,
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
|
||||
Object {
|
||||
"cron": "0 * * * * *",
|
||||
"enabled": false,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"name": "scheduleTest",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"mode": "full",
|
||||
"name": "default-backupNg",
|
||||
"settings": Object {
|
||||
"": Object {
|
||||
"reportWhen": "never",
|
||||
},
|
||||
},
|
||||
"type": "backup",
|
||||
"userId": Any<String>,
|
||||
"vms": Any<Object>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -92,23 +37,6 @@ Array [
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -127,23 +55,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -157,7 +69,69 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -168,7 +142,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -183,6 +157,19 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -197,19 +184,6 @@ Object {
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -224,6 +198,19 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -238,35 +225,6 @@ Object {
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -280,7 +238,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -291,7 +249,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -306,7 +264,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -319,6 +277,34 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
@@ -334,62 +320,18 @@ Object {
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "delta",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
@@ -403,7 +345,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -414,6 +356,47 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
@@ -455,65 +438,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 25`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 26`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 27`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"mode": "full",
|
||||
"reportWhen": "never",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"jobId": Any<String>,
|
||||
"jobName": "default-backupNg",
|
||||
"message": "backup",
|
||||
"scheduleId": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
@@ -524,7 +449,7 @@ Object {
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
|
||||
@@ -6,20 +6,44 @@ import { noSuchObject } from 'xo-common/api-errors'
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo from '../_xoConnection'
|
||||
import { getDefaultName, getDefaultSchedule } from '../_defaultValues'
|
||||
|
||||
const DEFAULT_SCHEDULE = {
|
||||
name: 'scheduleTest',
|
||||
cron: '0 * * * * *',
|
||||
const validateBackupJob = (jobInput, jobOutput, createdSchedule) => {
|
||||
const expectedObj = {
|
||||
id: expect.any(String),
|
||||
mode: jobInput.mode,
|
||||
name: jobInput.name,
|
||||
type: 'backup',
|
||||
settings: {
|
||||
'': jobInput.settings[''],
|
||||
},
|
||||
userId: xo._user.id,
|
||||
vms: jobInput.vms,
|
||||
}
|
||||
|
||||
const schedules = jobInput.schedules
|
||||
if (schedules !== undefined) {
|
||||
const scheduleTmpId = Object.keys(schedules)[0]
|
||||
expect(createdSchedule).toEqual({
|
||||
...schedules[scheduleTmpId],
|
||||
enabled: false,
|
||||
id: expect.any(String),
|
||||
jobId: jobOutput.id,
|
||||
})
|
||||
|
||||
expectedObj.settings[createdSchedule.id] = jobInput.settings[scheduleTmpId]
|
||||
}
|
||||
|
||||
expect(jobOutput).toEqual(expectedObj)
|
||||
}
|
||||
|
||||
const validateRootTask = (log, props) =>
|
||||
expect(log).toMatchSnapshot({
|
||||
const validateRootTask = (log, expected) =>
|
||||
expect(log).toEqual({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
message: 'backup',
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
...expected,
|
||||
})
|
||||
|
||||
const validateVmTask = (task, vmId, props) => {
|
||||
@@ -66,88 +90,55 @@ const validateOperationTask = (task, props) => {
|
||||
})
|
||||
}
|
||||
|
||||
// Note: `bypassVdiChainsCheck` must be enabled because the XAPI might be not
|
||||
// able to coalesce VDIs as fast as the tests run.
|
||||
//
|
||||
// See https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
describe('backupNg', () => {
|
||||
let defaultBackupNg
|
||||
|
||||
beforeAll(() => {
|
||||
defaultBackupNg = {
|
||||
name: 'default-backupNg',
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.createJob() :', () => {
|
||||
it('creates a new backup job without schedules', async () => {
|
||||
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
expect(backupNg).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNg.userId).toBe(xo._user.id)
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(jobInput, jobOutput)
|
||||
})
|
||||
|
||||
it('creates a new backup job with schedules', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
|
||||
|
||||
expect(backupNgJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
userId: expect.any(String),
|
||||
settings: expect.any(Object),
|
||||
vms: expect.any(Object),
|
||||
})
|
||||
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
|
||||
expect(backupNgJob.userId).toBe(xo._user.id)
|
||||
|
||||
expect(Object.keys(backupNgJob.settings).length).toBe(2)
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
expect(backupNgJob.settings[schedule.id]).toEqual({
|
||||
snapshotRetention: 1,
|
||||
})
|
||||
|
||||
expect(schedule).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
})
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(
|
||||
jobInput,
|
||||
jobOutput,
|
||||
await xo.getSchedule({ jobId: jobOutput.id })
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a backup job', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.call('backupNg.createJob', {
|
||||
...defaultBackupNg,
|
||||
const jobId = await xo.call('backupNg.createJob', {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
@@ -173,16 +164,19 @@ describe('backupNg', () => {
|
||||
|
||||
describe('.runJob() :', () => {
|
||||
it('fails trying to run a backup job without schedule', async () => {
|
||||
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
|
||||
const { id } = await xo.createTempBackupNgJob({
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
})
|
||||
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with no matching VMs', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
@@ -205,9 +199,8 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
@@ -231,25 +224,23 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(8e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: vmIdWithoutDisks } = await xo.createTempVm({
|
||||
name_label: 'XO Test Without Disks',
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.templateWithoutDisks,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: vmIdWithoutDisks,
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -264,12 +255,16 @@ describe('backupNg', () => {
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'skipped',
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
@@ -293,22 +288,24 @@ describe('backupNg', () => {
|
||||
const scheduleTempId = randomId()
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
...defaultBackupNg.settings,
|
||||
[scheduleTempId]: {},
|
||||
},
|
||||
srs: {
|
||||
id: config.srs.default,
|
||||
},
|
||||
})
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -324,12 +321,15 @@ describe('backupNg', () => {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'failure',
|
||||
})
|
||||
|
||||
expect(task).toMatchSnapshot({
|
||||
@@ -352,7 +352,6 @@ describe('backupNg', () => {
|
||||
jest.setTimeout(6e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
let vm = await xo.createTempVm({
|
||||
name_label: 'XO Test Temp',
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
VDIs: [
|
||||
@@ -365,22 +364,18 @@ describe('backupNg', () => {
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
...defaultBackupNg,
|
||||
const jobInput = {
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -420,12 +415,15 @@ describe('backupNg', () => {
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
expect(log).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
jobId: expect.any(String),
|
||||
scheduleId: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const subTaskSnapshot = subTasks.find(
|
||||
@@ -470,7 +468,7 @@ describe('backupNg', () => {
|
||||
const exportRetention = 2
|
||||
const fullInterval = 2
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
const jobInput = {
|
||||
mode: 'delta',
|
||||
remotes: {
|
||||
id: {
|
||||
@@ -478,13 +476,11 @@ describe('backupNg', () => {
|
||||
},
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: DEFAULT_SCHEDULE,
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
bypassVdiChainsCheck: true,
|
||||
fullInterval,
|
||||
reportWhen: 'never',
|
||||
},
|
||||
[remoteId1]: { deleteFirst: true },
|
||||
[scheduleTempId]: { exportRetention },
|
||||
@@ -492,7 +488,8 @@ describe('backupNg', () => {
|
||||
vms: {
|
||||
id: vmToBackup,
|
||||
},
|
||||
})
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
@@ -515,10 +512,12 @@ describe('backupNg', () => {
|
||||
backupLogs.forEach(({ tasks = [], ...log }, key) => {
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: 'delta',
|
||||
reportWhen: 'never',
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
message: 'backup',
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
@@ -585,4 +584,110 @@ describe('backupNg', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('create and execute backup with enabled offline backup', async () => {
|
||||
const vm = xo.objects.all[config.vms.withOsAndXenTools]
|
||||
if (vm.power_state !== 'Running') {
|
||||
await xo.startTempVm(vm.id, { force: true }, true)
|
||||
}
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const srId = config.srs.default
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const backupInput = {
|
||||
mode: 'full',
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
offlineBackup: true,
|
||||
},
|
||||
[scheduleTempId]: {
|
||||
copyRetention: 1,
|
||||
exportRetention: 1,
|
||||
},
|
||||
},
|
||||
srs: {
|
||||
id: srId,
|
||||
},
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
}
|
||||
const backup = await xo.createTempBackupNgJob(backupInput)
|
||||
expect(backup.settings[''].offlineBackup).toBe(true)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId: backup.id })
|
||||
|
||||
await Promise.all([
|
||||
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
|
||||
xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Halted') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Running') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
|
||||
const backupLogs = await xo.getBackupLogs({
|
||||
jobId: backup.id,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(backupLogs.length).toBe(1)
|
||||
|
||||
const { tasks, ...log } = backupLogs[0]
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: backupInput.mode,
|
||||
reportWhen: backupInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId: backup.id,
|
||||
jobName: backupInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...vmTask }) => {
|
||||
validateVmTask(vmTask, vm.id, { status: 'success' })
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...subTask }) => {
|
||||
expect(subTask.message).not.toBe('snapshot')
|
||||
|
||||
if (subTask.message === 'export') {
|
||||
validateExportTask(
|
||||
subTask,
|
||||
subTask.data.type === 'remote' ? remoteId : srId,
|
||||
{
|
||||
data: expect.any(Object),
|
||||
status: 'success',
|
||||
}
|
||||
)
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(operationTask => {
|
||||
if (
|
||||
operationTask.message === 'transfer' ||
|
||||
operationTask.message === 'merge'
|
||||
) {
|
||||
validateOperationTask(operationTask, {
|
||||
result: { size: expect.any(Number) },
|
||||
status: 'success',
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}, 200e3)
|
||||
})
|
||||
|
||||
@@ -6,7 +6,7 @@ import expect from 'must'
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
|
||||
import { map, assign } from 'lodash'
|
||||
import { map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
@@ -27,7 +27,7 @@ describe('disk', () => {
|
||||
const config = await getConfig()
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { assign, find, map } from 'lodash'
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, xo } from './util'
|
||||
|
||||
@@ -60,14 +60,16 @@ describe('server', () => {
|
||||
autoConnect: false,
|
||||
})
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
)).message
|
||||
(
|
||||
await rejectionOf(
|
||||
addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
)
|
||||
).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
@@ -151,7 +153,7 @@ describe('server', () => {
|
||||
|
||||
it('connects to a Xen server', async () => {
|
||||
const serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
|
||||
await xo.call('server.connect', {
|
||||
@@ -184,7 +186,7 @@ describe('server', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer(
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
getOneHost,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { assign, map } from 'lodash'
|
||||
import { map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
@@ -33,7 +33,7 @@ describe('vbd', () => {
|
||||
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
assign({ autoConnect: false }, config.xenServer1)
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
@@ -60,13 +60,15 @@ describe('cd', () => {
|
||||
await getOrWaitCdVbdPosition(vmId)
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.ubuntuIsoId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
(
|
||||
await rejectionOf(
|
||||
xo.call('vm.insertCd', {
|
||||
id: vmId,
|
||||
cd_id: config.ubuntuIsoId,
|
||||
force: false,
|
||||
})
|
||||
)
|
||||
).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
|
||||
@@ -126,12 +126,14 @@ describe('the VM life cyle', () => {
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.restart', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
(
|
||||
await rejectionOf(
|
||||
xo.call('vm.restart', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)
|
||||
).message
|
||||
).toBe('VM lacks feature shutdown')
|
||||
})
|
||||
|
||||
@@ -196,12 +198,14 @@ describe('the VM life cyle', () => {
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
xo.call('vm.stop', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)).message
|
||||
(
|
||||
await rejectionOf(
|
||||
xo.call('vm.stop', {
|
||||
id: hvmWithoutToolsId,
|
||||
force: false,
|
||||
})
|
||||
)
|
||||
).message
|
||||
).toBe('clean shutdown requires PV drivers')
|
||||
})
|
||||
|
||||
|
||||
@@ -34,14 +34,14 @@
|
||||
"dependencies": {
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
"promise-toolbox": "^0.14.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user