Compare commits

..

2 Commits

Author SHA1 Message Date
Pierre Donias
1efe1d82cf listMissingPatchesFailed 2019-10-14 15:59:38 +02:00
Pierre Donias
c4b4ee6476 fix(xo-server,xo-web,xo-common): list missing patches error handling 2019-10-14 15:59:37 +02:00
242 changed files with 6778 additions and 8424 deletions

View File

@@ -8,8 +8,5 @@
"directory": "@xen-orchestra/babel-config",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=6"
}
}

View File

@@ -1,32 +0,0 @@
const getopts = require('getopts')
const { version } = require('./package.json')
module.exports = commands =>
async function(args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
},
boolean: ['help'],
stopEarly: true,
})
const commandName = opts.help || args.length === 0 ? 'help' : args[0]
const command = commands[commandName]
if (command === undefined) {
process.stdout.write(`Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${prefix} ${command} ${commands[command].usage || ''}`)
.join('\n\n')}
xo-backups v${version}
`)
process.exitCode = commandName === 'help' ? 0 : 1
return
}
return command.main(args.slice(1), prefix + ' ' + commandName)
}

View File

@@ -1,393 +0,0 @@
#!/usr/bin/env node
// assigned when options are parsed by the main function
let force
// -----------------------------------------------------------------------------
const assert = require('assert')
const getopts = require('getopts')
const lockfile = require('proper-lockfile')
const { default: Vhd } = require('vhd-lib')
const { curryRight, flatten } = require('lodash')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { pipe, promisifyAll } = require('promise-toolbox')
const fs = promisifyAll(require('fs'))
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
// -----------------------------------------------------------------------------
const asyncMap = curryRight((iterable, fn) =>
Promise.all(
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
)
)
const filter = (...args) => thisArg => thisArg.filter(...args)
const isGzipFile = async fd => {
// https://tools.ietf.org/html/rfc1952.html#page-5
const magicNumber = Buffer.allocUnsafe(2)
assert.strictEqual(
await fs.read(fd, magicNumber, 0, magicNumber.length, 0),
magicNumber.length
)
return magicNumber[0] === 31 && magicNumber[1] === 139
}
// TODO: better check?
//
// our heuristic is not good enough, there has been some false positives
// (detected as invalid by us but valid by `tar` and imported with success),
// either THOUGH THEY MAY HAVE BEEN COMPRESSED FILES:
// - these files were normal but the check is incorrect
// - these files were invalid but without data loss
// - these files were invalid but with silent data loss
//
// maybe reading the end of the file looking for a file named
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
//
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
const isValidTar = async (size, fd) => {
if (size <= 1024 || size % 512 !== 0) {
return false
}
const buf = Buffer.allocUnsafe(1024)
assert.strictEqual(
await fs.read(fd, buf, 0, buf.length, size - buf.length),
buf.length
)
return buf.every(_ => _ === 0)
}
// TODO: find an heuristic for compressed files
const isValidXva = async path => {
try {
const fd = await fs.open(path, 'r')
try {
const { size } = await fs.fstat(fd)
if (size < 20) {
// neither a valid gzip not tar
return false
}
return (await isGzipFile(fd))
? true // gzip files cannot be validated at this time
: await isValidTar(size, fd)
} finally {
fs.close(fd).catch(noop)
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}
const noop = Function.prototype
const readDir = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
entries[i] = `${path}/${entry}`
})
return entries
},
error => {
// a missing dir is by definition empty
if (error != null && error.code === 'ENOENT') {
return []
}
throw error
}
)
// -----------------------------------------------------------------------------
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
console.warn('Unused parents of VHD', child)
chain
.slice(1)
.reverse()
.forEach(parent => {
console.warn(' ', parent)
})
force && console.warn(' merging…')
console.warn('')
if (force) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
}
await Promise.all([
force && fs.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
console.warn('Unused VHD', child)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(child)
}),
])
}
const listVhds = pipe([
vmDir => vmDir + '/vdis',
readDir,
asyncMap(readDir),
flatten,
asyncMap(readDir),
flatten,
filter(_ => _.endsWith('.vhd')),
])
async function handleVm(vmDir) {
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(await listVhds(vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error(
'this script does not support multiple VHD children'
)
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
console.warn('Error while checking VHD', path)
console.warn(' ', error)
if (error != null && error.code === 'ERR_ASSERTION') {
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(path))
}
}
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
console.warn('Error while checking VHD', vhd)
console.warn(' missing parent', parent)
force && console.warn(' deleting…')
console.warn('')
force && deletions.push(handler.unlink(vhd))
}
}
// > A property that is deleted before it has been visited will not be
// > visited later.
// >
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
for (const child in vhdParents) {
deleteIfOrphan(child)
}
await Promise.all(deletions)
}
const [jsons, xvas] = await readDir(vmDir).then(entries => [
entries.filter(_ => _.endsWith('.json')),
new Set(entries.filter(_ => _.endsWith('.xva'))),
])
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await isValidXva(path))) {
console.warn('Potential broken XVA', path)
console.warn('')
}
})
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await fs.readFile(json))
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve(vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
console.warn('Error while checking backup', json)
console.warn(' missing file', linkedXva)
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
} else {
console.warn('Error while checking backup', json)
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
console.warn(
' %i/%i missing VHDs',
missingVhds.length,
linkedVhds.length
)
missingVhds.forEach(vhd => {
console.warn(' ', vhd)
})
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
const toCheck = new Set(unusedVhds)
const getUsedChildChainOrDelete = vhd => {
if (vhd in vhdChainsToMerge) {
const chain = vhdChainsToMerge[vhd]
delete vhdChainsToMerge[vhd]
return chain
}
if (!unusedVhds.has(vhd)) {
return [vhd]
}
// no longer needs to be checked
toCheck.delete(vhd)
const child = vhdChildren[vhd]
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
return chain
}
}
console.warn('Unused VHD', vhd)
force && console.warn(' deleting…')
console.warn('')
force && unusedVhdsDeletion.push(handler.unlink(vhd))
}
toCheck.forEach(vhd => {
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain))
}
})
}
await Promise.all([
unusedVhdsDeletion,
asyncMap(unusedXvas, path => {
console.warn('Unused XVA', path)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(path)
}),
])
}
// -----------------------------------------------------------------------------
module.exports = async function main(args) {
const opts = getopts(args, {
alias: {
force: 'f',
},
boolean: ['force'],
default: {
force: false,
},
})
;({ force } = opts)
await asyncMap(opts._, async vmDir => {
vmDir = resolve(vmDir)
// TODO: implement this in `xo-server`, not easy because not compatible with
// `@xen-orchestra/fs`.
const release = await lockfile.lock(vmDir)
try {
await handleVm(vmDir)
} catch (error) {
console.error('handleVm', vmDir, error)
} finally {
await release()
}
})
}

View File

@@ -1,13 +1,378 @@
#!/usr/bin/env node
require('./_composeCommands')({
'clean-vms': {
get main() {
return require('./commands/clean-vms')
const args = process.argv.slice(2)
if (
args.length === 0 ||
/^(?:-h|--help)$/.test(args[0]) ||
args[0] !== 'clean-vms'
) {
console.log('Usage: xo-backups clean-vms [--force] xo-vm-backups/*')
// eslint-disable-next-line no-process-exit
return process.exit(1)
}
// remove `clean-vms` arg which is the only available command ATM
args.splice(0, 1)
// only act (ie delete files) if `--force` is present
const force = args[0] === '--force'
if (force) {
args.splice(0, 1)
}
// -----------------------------------------------------------------------------
const assert = require('assert')
const lockfile = require('proper-lockfile')
const { default: Vhd } = require('vhd-lib')
const { curryRight, flatten } = require('lodash')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { pipe, promisifyAll } = require('promise-toolbox')
const fs = promisifyAll(require('fs'))
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
// -----------------------------------------------------------------------------
const asyncMap = curryRight((iterable, fn) =>
Promise.all(
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
)
)
const filter = (...args) => thisArg => thisArg.filter(...args)
// TODO: better check?
// our heuristic is not good enough, there has been some false positives
// (detected as invalid by us but valid by `tar` and imported with success),
// either:
// - these files were normal but the check is incorrect
// - these files were invalid but without data loss
// - these files were invalid but with silent data loss
//
// FIXME: the heuristic does not work if the XVA is compressed, we need to
// implement a specific test for it
//
// maybe reading the end of the file looking for a file named
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
//
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
const isValidTar = async path => {
try {
const fd = await fs.open(path, 'r')
try {
const { size } = await fs.fstat(fd)
if (size <= 1024 || size % 512 !== 0) {
return false
}
const buf = Buffer.allocUnsafe(1024)
assert.strictEqual(
await fs.read(fd, buf, 0, buf.length, size - buf.length),
buf.length
)
return buf.every(_ => _ === 0)
} finally {
fs.close(fd).catch(noop)
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidTar', path, error)
return true
}
}
const noop = Function.prototype
const readDir = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
entries[i] = `${path}/${entry}`
})
return entries
},
usage: '[--force] xo-vm-backups/*',
},
})(process.argv.slice(2), 'xo-backups').catch(error => {
console.error('main', error)
process.exitCode = 1
})
error => {
// a missing dir is by definition empty
if (error != null && error.code === 'ENOENT') {
return []
}
throw error
}
)
// -----------------------------------------------------------------------------
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
console.warn('Unused parents of VHD', child)
chain
.slice(1)
.reverse()
.forEach(parent => {
console.warn(' ', parent)
})
force && console.warn(' merging…')
console.warn('')
if (force) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
}
await Promise.all([
force && fs.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
console.warn('Unused VHD', child)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(child)
}),
])
}
const listVhds = pipe([
vmDir => vmDir + '/vdis',
readDir,
asyncMap(readDir),
flatten,
asyncMap(readDir),
flatten,
filter(_ => _.endsWith('.vhd')),
])
async function handleVm(vmDir) {
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(await listVhds(vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error(
'this script does not support multiple VHD children'
)
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
console.warn('Error while checking VHD', path)
console.warn(' ', error)
if (error != null && error.code === 'ERR_ASSERTION') {
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(path))
}
}
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
console.warn('Error while checking VHD', vhd)
console.warn(' missing parent', parent)
force && console.warn(' deleting…')
console.warn('')
force && deletions.push(handler.unlink(vhd))
}
}
// > A property that is deleted before it has been visited will not be
// > visited later.
// >
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
for (const child in vhdParents) {
deleteIfOrphan(child)
}
await Promise.all(deletions)
}
const [jsons, xvas] = await readDir(vmDir).then(entries => [
entries.filter(_ => _.endsWith('.json')),
new Set(entries.filter(_ => _.endsWith('.xva'))),
])
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await isValidTar(path))) {
console.warn('Potential broken XVA', path)
console.warn('')
}
})
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await fs.readFile(json))
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve(vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
console.warn('Error while checking backup', json)
console.warn(' missing file', linkedXva)
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
} else {
console.warn('Error while checking backup', json)
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
console.warn(
' %i/%i missing VHDs',
missingVhds.length,
linkedVhds.length
)
missingVhds.forEach(vhd => {
console.warn(' ', vhd)
})
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
const toCheck = new Set(unusedVhds)
const getUsedChildChainOrDelete = vhd => {
if (vhd in vhdChainsToMerge) {
const chain = vhdChainsToMerge[vhd]
delete vhdChainsToMerge[vhd]
return chain
}
if (!unusedVhds.has(vhd)) {
return [vhd]
}
// no longer needs to be checked
toCheck.delete(vhd)
const child = vhdChildren[vhd]
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
return chain
}
}
console.warn('Unused VHD', vhd)
force && console.warn(' deleting…')
console.warn('')
force && unusedVhdsDeletion.push(handler.unlink(vhd))
}
toCheck.forEach(vhd => {
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain))
}
})
}
await Promise.all([
unusedVhdsDeletion,
asyncMap(unusedXvas, path => {
console.warn('Unused XVA', path)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(path)
}),
])
}
// -----------------------------------------------------------------------------
asyncMap(args, async vmDir => {
vmDir = resolve(vmDir)
// TODO: implement this in `xo-server`, not easy because not compatible with
// `@xen-orchestra/fs`.
const release = await lockfile.lock(vmDir)
try {
await handleVm(vmDir)
} catch (error) {
console.error('handleVm', vmDir, error)
} finally {
await release()
}
}).catch(error => console.error('main', error))

View File

@@ -4,15 +4,14 @@
},
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/fs": "^0.10.2",
"getopts": "^2.2.5",
"@xen-orchestra/fs": "^0.10.1",
"lodash": "^4.17.15",
"promise-toolbox": "^0.14.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^0.7.2"
"vhd-lib": "^0.7.0"
},
"engines": {
"node": ">=7.10.1"
"node": ">=8.16.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",

View File

@@ -16,7 +16,7 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.27.3"
"xen-api": "^0.27.2"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "1.0.6",
"version": "1.0.4",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [

View File

@@ -5,21 +5,14 @@ import parse from './parse'
const MAX_DELAY = 2 ** 31 - 1
function nextDelay(schedule) {
const now = schedule._createDate()
return next(schedule._schedule, now) - now
}
class Job {
constructor(schedule, fn) {
let scheduledDate
const wrapper = () => {
const now = Date.now()
if (scheduledDate > now) {
// we're early, delay
//
// no need to check _isEnabled, we're just delaying the existing timeout
//
// see https://github.com/vatesfr/xen-orchestra/issues/4625
this._timeout = setTimeout(wrapper, scheduledDate - now)
return
}
this._isRunning = true
let result
@@ -39,9 +32,7 @@ class Job {
this._isRunning = false
if (this._isEnabled) {
const now = schedule._createDate()
scheduledDate = +next(schedule._schedule, now)
const delay = scheduledDate - now
const delay = nextDelay(schedule)
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)

View File

@@ -2,24 +2,12 @@
import { createSchedule } from './'
const wrap = value => () => value
describe('issues', () => {
let originalDateNow
beforeAll(() => {
originalDateNow = Date.now
})
afterAll(() => {
Date.now = originalDateNow
originalDateNow = undefined
})
test('stop during async execution', async () => {
let nCalls = 0
let resolve, promise
const schedule = createSchedule('* * * * *')
const job = schedule.createJob(() => {
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
@@ -30,7 +18,6 @@ describe('issues', () => {
})
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(1)
@@ -48,8 +35,7 @@ describe('issues', () => {
let nCalls = 0
let resolve, promise
const schedule = createSchedule('* * * * *')
const job = schedule.createJob(() => {
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
@@ -60,7 +46,6 @@ describe('issues', () => {
})
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(1)
@@ -71,7 +56,6 @@ describe('issues', () => {
resolve()
await promise
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(2)
})

View File

@@ -1,13 +1,13 @@
# @xen-orchestra/defined [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/defined):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save @xen-orchestra/defined
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -62,10 +62,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/fs",
"version": "0.10.2",
"version": "0.10.1",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -18,16 +18,16 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@marsaud/smb2": "^0.14.0",
"@sindresorhus/df": "^3.1.1",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.5.0",
"execa": "^3.2.0",
"execa": "^1.0.0",
"fs-extra": "^8.0.1",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0",

View File

@@ -389,7 +389,7 @@ export default class RemoteHandlerAbstract {
async test(): Promise<Object> {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
const data = await fromCallback(randomBytes, SIZE)
const data = await fromCallback(cb => randomBytes(SIZE, cb))
let step = 'write'
try {
const writeStart = process.hrtime()

View File

@@ -86,7 +86,7 @@ handlers.forEach(url => {
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
@@ -106,7 +106,7 @@ handlers.forEach(url => {
describe('#createWriteStream()', () => {
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
const stream = await handler.createWriteStream(file, { flags })
await fromCallback(pipeline, createTestDataStream(), stream)
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
})

View File

@@ -47,19 +47,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
})
}
async _getInfo() {
// df.file() resolves with an object with the following properties:
// filesystem, type, size, used, available, capacity and mountpoint.
// size, used, available and capacity may be `NaN` so we remove any `NaN`
// value from the object.
const info = await df.file(this._getFilePath('/'))
Object.keys(info).forEach(key => {
if (Number.isNaN(info[key])) {
delete info[key]
}
})
return info
_getInfo() {
return df.file(this._getFilePath('/'))
}
async _getSize(file) {

View File

@@ -15,7 +15,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/log):
Everywhere something should be logged:
```js
import { createLogger } from '@xen-orchestra/log'
import createLogger from '@xen-orchestra/log'
const log = createLogger('my-module')
@@ -42,7 +42,6 @@ log.error('could not join server', {
Then, at application level, configure the logs are handled:
```js
import { createLogger } from '@xen-orchestra/log'
import { configure, catchGlobalErrors } from '@xen-orchestra/log/configure'
import transportConsole from '@xen-orchestra/log/transports/console'
import transportEmail from '@xen-orchestra/log/transports/email'
@@ -78,8 +77,8 @@ configure([
])
// send all global errors (uncaught exceptions, warnings, unhandled rejections)
// to this logger
catchGlobalErrors(createLogger('app'))
// to this transport
catchGlobalErrors(transport)
```
### Transports

View File

@@ -48,7 +48,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,13 +1,13 @@
# @xen-orchestra/mixin [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/mixin):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save @xen-orchestra/mixin
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -4,107 +4,14 @@
### Enhancements
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
- [SAML] Setting to disable requested authentication context (helps with _Active Directory_) (PR [#4675](https://github.com/vatesfr/xen-orchestra/pull/4675))
- The default sign-in page can be configured via `authentication.defaultSignInPage` (PR [#4678](https://github.com/vatesfr/xen-orchestra/pull/4678))
- [SR] Allow import of VHD and VMDK disks [#4137](https://github.com/vatesfr/xen-orchestra/issues/4137) (PR [#4138](https://github.com/vatesfr/xen-orchestra/pull/4138) )
- [Host] Advanced Live Telemetry (PR [#4680](https://github.com/vatesfr/xen-orchestra/pull/4680))
### Bug fixes
- [Metadata backup] Add 10 minutes timeout to avoid stuck jobs [#4657](https://github.com/vatesfr/xen-orchestra/issues/4657) (PR [#4666](https://github.com/vatesfr/xen-orchestra/pull/4666))
- [Metadata backups] Fix out-of-date listing for 1 minute due to cache (PR [#4672](https://github.com/vatesfr/xen-orchestra/pull/4672))
- [Delta backup] Limit the number of merged deltas per run to avoid interrupted jobs (PR [#4674](https://github.com/vatesfr/xen-orchestra/pull/4674))
### Released packages
- vhd-lib v0.7.2
- xo-vmdk-to-vhd v0.1.8
- xo-server-auth-ldap v0.6.6
- xo-server-auth-saml v0.7.0
- xo-server-backup-reports v0.16.4
- @xen-orchestra/fs v0.10.2
- xo-server v5.53.0
- xo-web v5.53.1
## **5.40.2** (2019-11-22)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [Logs] Ability to report a bug with attached log (PR [#4201](https://github.com/vatesfr/xen-orchestra/pull/4201))
- [Backup] Reduce _VDI chain protection error_ occurrence by being more tolerant (configurable via `xo-server`'s `xapiOptions.maxUncoalescedVdis` setting) [#4124](https://github.com/vatesfr/xen-orchestra/issues/4124) (PR [#4651](https://github.com/vatesfr/xen-orchestra/pull/4651))
- [Plugin] [Web hooks](https://xen-orchestra.com/docs/web-hooks.html) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
- [Tables] Always put the tables' search in the URL [#4542](https://github.com/vatesfr/xen-orchestra/issues/4542) (PR [#4637](https://github.com/vatesfr/xen-orchestra/pull/4637))
### Bug fixes
- [SDN controller] Prevent private network creation on bond slave PIF (Fixes https://github.com/xcp-ng/xcp/issues/300) (PR [4633](https://github.com/vatesfr/xen-orchestra/pull/4633))
- [Metadata backup] Fix failed backup reported as successful [#4596](https://github.com/vatesfr/xen-orchestra/issues/4596) (PR [#4598](https://github.com/vatesfr/xen-orchestra/pull/4598))
- [Backup NG] Fix "task cancelled" error when the backup job timeout exceeds 596 hours [#4662](https://github.com/vatesfr/xen-orchestra/issues/4662) (PR [#4663](https://github.com/vatesfr/xen-orchestra/pull/4663))
- Fix `promise rejected with non-error` warnings in logs (PR [#4659](https://github.com/vatesfr/xen-orchestra/pull/4659))
### Released packages
- xo-server-web-hooks v0.1.0
- xen-api v0.27.3
- xo-server-backup-reports v0.16.3
- vhd-lib v0.7.1
- xo-server v5.52.1
- xo-web v5.52.0
## **5.40.1** (2019-10-29)
### Bug fixes
- [XOSAN] Fix "Install Cloud plugin" warning (PR [#4631](https://github.com/vatesfr/xen-orchestra/pull/4631))
### Released packages
- xo-web v5.51.1
## **5.40.0** (2019-10-29)
### Breaking changes
- `xo-server` requires Node 8.
### Highlights
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
### Enhancements
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
### Bug fixes
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
### Released packages
- @xen-orchestra/cron v1.0.6
- xo-server-transport-icinga2 v0.1.0
- xo-server-sdn-controller v0.3.1
- xo-server v5.51.1
- xo-web v5.51.0
### Dropped packages
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
## **5.39.1** (2019-10-11)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
@@ -175,6 +82,8 @@
## **5.38.0** (2019-08-29)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Enhancements
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))

View File

@@ -3,16 +3,21 @@
> Keep in mind the changelog is addressed to **users** and should be
> understandable by them.
### Breaking changes
- `xo-server` requires Node 8.
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Host] Fix Enable Live Telemetry button state (PR [#4686](https://github.com/vatesfr/xen-orchestra/pull/4686))
- [Host] Fix Advanced Live Telemetry URL (PR [#4687](https://github.com/vatesfr/xen-orchestra/pull/4687))
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
- [Patches] Better error handling when fetching missing patches (PR [#4519](https://github.com/vatesfr/xen-orchestra/pull/4519))
### Released packages
@@ -21,5 +26,5 @@
>
> Rule of thumb: add packages on top.
- xo-server v5.54.0
- xo-web v5.54.0
- xo-server v5.51.0
- xo-web v5.51.0

View File

@@ -51,7 +51,6 @@
* [Health](health.md)
* [Job manager](scheduler.md)
* [Alerts](alerts.md)
* [Web hooks](web-hooks.md)
* [Load balancing](load_balancing.md)
* [Emergency Shutdown](emergency_shutdown.md)
* [Auto scalability](auto_scalability.md)

View File

@@ -22,7 +22,7 @@ group = 'nogroup'
By default, XO-server listens on all addresses (0.0.0.0) and runs on port 80. If you need to, you can change this in the `# Basic HTTP` section:
```toml
hostname = '0.0.0.0'
host = '0.0.0.0'
port = 80
```
@@ -31,7 +31,7 @@ port = 80
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
```toml
hostname = '0.0.0.0'
host = '0.0.0.0'
port = 443
certificate = './certificate.pem'
key = './key.pem'
@@ -43,10 +43,10 @@ key = './key.pem'
If you want to redirect everything to HTTPS, you can modify the configuration like this:
```toml
```
# If set to true, all HTTP traffic will be redirected to the first HTTPs configuration.
redirectToHttps = true
redirectToHttps: true
```
This should be written just before the `mount` option, inside the `http:` block.

View File

@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
```
$ node -v
v8.16.2
v8.12.0
```
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
@@ -65,13 +65,17 @@ Now you have to create a config file for `xo-server`:
```
$ cd packages/xo-server
$ mkdir -p ~/.config/xo-server
$ cp sample.config.toml ~/.config/xo-server/config.toml
$ cp sample.config.toml .xo-server.toml
```
> Note: If you're installing `xo-server` as a global service, you may want to copy the file to `/etc/xo-server/config.toml` instead.
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory):
In this config file, you can change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
```toml
[http.mounts]
'/' = '../xo-web/dist/'
```
In this config file, you can also change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
You can try to start xo-server to see if it works. You should have something like this:
@@ -182,7 +186,7 @@ service redis start
## SUDO
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server` configuration file and setting `useSudo = true`. It's near the end of the file:
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server/.xo-server.toml` and setting `useSudo = true`. It's near the end of the file:
```
useSudo = true

View File

@@ -1,72 +0,0 @@
# Web hooks
⚠ This feature is experimental!
## Configuration
The plugin "web-hooks" needs to be installed and loaded for this feature to work.
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called.
* Go to Settings > Plugins > Web hooks
* Add new hooks
* For each hook, configure:
* Method: the XO API method that will trigger the HTTP request when called
* Type:
* pre: the request will be sent when the method is called
* post: the request will be sent after the method action is completed
* pre/post: both
* URL: the full URL which the requests will be sent to
* Save the plugin configuration
From now on, a request will be sent to the corresponding URLs when a configured method is called by an XO client.
## Request content
```
POST / HTTP/1.1
Content-Type: application/json
```
The request's body is a JSON string representing an object with the following properties:
- `type`: `"pre"` or `"post"`
- `callId`: unique ID for this call to help match a pre-call and a post-call
- `userId`: unique internal ID of the user who performed the call
- `userName`: login/e-mail address of the user who performed the call
- `method`: name of the method that was called (e.g. `"vm.start"`)
- `params`: call parameters (object)
- `timestamp`: epoch timestamp of the beginning ("pre") or end ("post") of the call in ms
- `duration`: duration of the call in ms ("post" hooks only)
- `result`: call result on success ("post" hooks only)
- `error`: call result on error ("post" hooks only)
## Request handling
*Quick Node.js example of how you may want to handle the requests*
```js
const http = require('http')
const { exec } = require('child_process')
http
.createServer((req, res) => {
let body = ''
req.on('data', chunk => {
body += chunk
})
req.on('end', () => handleHook(body))
res.end()
})
.listen(3000)
const handleHook = data => {
const { method, params, type, result, error, timestamp } = JSON.parse(data)
// Log it
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params}${result || error}`)
// Run scripts
exec(`./hook-scripts/${method}-${type}.sh`)
}
```

View File

@@ -22,9 +22,9 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
### The quickest way
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go to https://xen-orchestra.com/#!/xoa and follow the instructions.
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
> **Note:** no data will be sent to our servers, the deployment only runs between your browser and your host!
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
![](./assets/deploy_form.png)
@@ -41,12 +41,12 @@ bash -c "$(curl -s http://xoa.io/deploy)"
Follow the instructions:
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS server should be provided.
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
### Via a manual XVA download
### Via download the XVA
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
@@ -64,35 +64,6 @@ Once you have started the VM, you can access the web UI by putting the IP you co
**The first thing** you need to do with your XOA is register. [Read the documentation on the page dedicated to the updater/register inferface](updater.md).
## Technical Support
In your appliance, you can access the support section in the XOA menu. In this section you can:
* launch an `xoa check` command
![](https://xen-orchestra.com/blog/content/images/2019/10/xoacheck.png)
* Open a secure support tunnel so our team can remotely investigate
![](https://user-images.githubusercontent.com/10992860/67384755-10f47f80-f592-11e9-974d-bbdefd0bf353.gif)
<a id="ssh-pro-support"></a>
If your web UI is not working, you can also open the secure support tunnel from the CLI. To open a private tunnel (we are the only one with the private key), you can use the command `xoa support tunnel` like below:
```
$ xoa support tunnel
The support tunnel has been created.
Do not stop this command before the intervention is over!
Give this id to the support: 40713
```
Give us this number, and we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
> The tunnel utilizes the user `xoa-support`. If you want to deactivate this bundled user, you can run `chage -E 0 xoa-support`. To re-activate this account, you must run `chage -E 1 xoa-support`.
### First console connection
If you connect via SSH or console, the default credentials are:
@@ -185,6 +156,21 @@ You can access the VM console through XenCenter or using VNC through a SSH tunne
If you want to go back in DHCP, just run `xoa network dhcp`
### SSH Pro Support
By default, if you need support, there is a dedicated user named `xoa-support`. We are the only one with the private key. If you want our assistance on your XOA, you can open a private tunnel with the command `xoa support tunnel` like below:
```
$ xoa support tunnel
The support tunnel has been created.
Do not stop this command before the intervention is over!
Give this id to the support: 40713
```
Give us this number, we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
> If you want to deactivate this bundled user, you can type `chage -E 0 xoa-support`. To re-activate this account, you must use the `chage -E 1 xoa-support`.
### Firewall

View File

@@ -17,7 +17,7 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.112.0",
"flow-bin": "^0.109.0",
"globby": "^10.0.0",
"husky": "^3.0.0",
"jest": "^24.1.0",
@@ -60,7 +60,6 @@
"posttest": "scripts/run-script test",
"prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .",
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test-integration": "jest \".integ\\.spec\\.js$\"",
"travis-tests": "scripts/travis-tests"

View File

@@ -24,15 +24,15 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.2",
"@xen-orchestra/fs": "^0.10.1",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.7.2"
"vhd-lib": "^0.7.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -40,7 +40,7 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"execa": "^3.2.0",
"execa": "^2.0.2",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.14.0",
"rimraf": "^3.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-lib",
"version": "0.7.2",
"version": "0.7.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -18,17 +18,15 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"lodash": "^4.17.4",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -37,10 +35,10 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.2",
"@xen-orchestra/fs": "^0.10.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"execa": "^3.2.0",
"execa": "^2.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"index-modules": "^0.3.0",

View File

@@ -17,7 +17,10 @@ export default async function readChunk(stream, n) {
resolve(Buffer.concat(chunks, i))
}
const onEnd = resolve2
function onEnd() {
resolve2()
clean()
}
function onError(error) {
reject(error)
@@ -31,11 +34,8 @@ export default async function readChunk(stream, n) {
}
i += chunk.length
chunks.push(chunk)
if (i === n) {
if (i >= n) {
resolve2()
} else if (i > n) {
throw new RangeError(`read (${i}) more than expected (${n})`)
}
}

View File

@@ -29,13 +29,13 @@ export default asyncIteratorToStream(async function*(size, blockParser) {
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.logicalAddressBytes - position
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield* filePadding(paddingLength)
yield next.data
position = next.logicalAddressBytes + next.data.length
position = next.offsetBytes + next.data.length
}
yield* filePadding(actualSize - position)
yield footer

View File

@@ -1,6 +1,5 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forEachRight } from 'lodash'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
@@ -18,65 +17,38 @@ import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* Looks once backwards to collect the last fragment of each VHD block (they could be interleaved),
* then allocates the blocks in a forwards pass.
* @returns currentVhdPositionSector the first free sector after the data
*/
function createBAT(
firstBlockPosition,
fragmentLogicAddressList,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
const lastFragmentPerBlock = new Map()
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
)
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
// lastFragmentPerBlock is from last to first, so we go the other way around
forEachRight(
lastFragmentPerBlockArray,
([vhdTableIndex, _fragmentVirtualAddress]) => {
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
}
)
return [currentVhdPositionSector, lastFragmentPerBlock]
return currentVhdPositionSector
}
/**
* Receives an iterator of constant sized fragments, and a list of their address in virtual space, and returns
* a stream representing the VHD file of this disk.
* The fragment size should be an integer divider of the VHD block size.
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
* @param diskSize
* @param fragmentSize
* @param fragmentLogicalAddressList
* @param fragmentIterator
* @returns {Promise<Function>}
*/
export default async function createReadableStream(
diskSize,
fragmentSize,
fragmentLogicalAddressList,
fragmentIterator
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${fragmentSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
@@ -108,72 +80,60 @@ export default async function createReadableStream(
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
const [endOfData, lastFragmentPerBlock] = createBAT(
const endOfData = createBAT(
firstBlockPosition,
fragmentLogicalAddressList,
blockAddressList,
ratio,
bat,
bitmapSize
)
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
let position = 0
function* yieldAndTrack(buffer, expectedPosition, reason) {
function* yieldAndTrack(buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition, reason)
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
function insertFragmentInBlock(fragment, blockWithBitmap) {
const fragmentOffsetInBlock =
(fragment.logicalAddressBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(blockWithBitmap, fragmentOffsetInBlock + bitPos)
}
fragment.data.copy(
blockWithBitmap,
bitmapSize + (fragment.logicalAddressBytes % VHD_BLOCK_SIZE_BYTES)
)
}
async function* generateBlocks(fragmentIterator, bitmapSize) {
let currentFragmentIndex = -1
// store blocks waiting for some of their fragments.
const batIndexToBlockMap = new Map()
for await (const fragment of fragmentIterator) {
currentFragmentIndex++
const batIndex = Math.floor(
fragment.logicalAddressBytes / VHD_BLOCK_SIZE_BYTES
)
let currentBlockWithBitmap = batIndexToBlockMap.get(batIndex)
if (currentBlockWithBitmap === undefined) {
async function* generateFileContent(blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield* yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
batIndexToBlockMap.set(batIndex, currentBlockWithBitmap)
currentVhdBlockIndex = batIndex
}
insertFragmentInBlock(fragment, currentBlockWithBitmap)
const batEntry = bat.readUInt32BE(batIndex * 4)
assert.notStrictEqual(batEntry, BLOCK_UNUSED)
const batPosition = batEntry * SECTOR_SIZE
if (lastFragmentPerBlock.get(batIndex) === fragment.logicalAddressBytes) {
batIndexToBlockMap.delete(batIndex)
yield* yieldAndTrack(
currentBlockWithBitmap,
batPosition,
`VHD block start index: ${currentFragmentIndex}`
)
const blockOffset =
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + (next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
)
}
yield* yieldAndTrack(currentBlockWithBitmap)
}
async function* iterator() {
yield* yieldAndTrack(footer, 0)
yield* yieldAndTrack(header, FOOTER_SIZE)
yield* yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield* generateBlocks(fragmentIterator, bitmapSize)
yield* generateFileContent(blockIterator, bitmapSize, ratio)
yield* yieldAndTrack(footer)
}

View File

@@ -1,5 +1,4 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
@@ -14,17 +13,12 @@ import {
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const { warn } = createLogger('vhd-lib:createSyntheticStream')
export default async function createSyntheticStream(handler, paths) {
const fds = []
const cleanup = () => {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
warn('error while closing file', {
error,
fd: fds[i],
})
console.warn('createReadStream, closeFd', i, error)
})
}
}

View File

@@ -6,8 +6,11 @@ export { default as chainVhd } from './chain'
export { default as checkVhdChain } from './checkChain'
export { default as createContentStream } from './createContentStream'
export { default as createReadableRawStream } from './createReadableRawStream'
export { default as createReadableSparseStream } from './createReadableSparseStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export {
default as createVhdStreamWithLength,
} from './createVhdStreamWithLength'

View File

@@ -1,10 +0,0 @@
import readChunk from './_readChunk'
import { FOOTER_SIZE } from './_constants'
import { fuFooter } from './_structs'
export default async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
}

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
@@ -16,7 +15,10 @@ import {
SECTOR_SIZE,
} from './_constants'
const { debug } = createLogger('vhd-lib:Vhd')
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
@@ -38,11 +40,9 @@ const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(
actual,
expected,
`invalid ${name} checksum ${actual}, expected ${expected}`
)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// unused block as buffer containing a uint32BE
@@ -102,7 +102,7 @@ export default class Vhd {
}
// Returns the first address after metadata. (In bytes)
_getEndOfHeaders() {
getEndOfHeaders() {
const { header } = this
let end = FOOTER_SIZE + HEADER_SIZE
@@ -127,8 +127,8 @@ export default class Vhd {
}
// Returns the first sector after data.
_getEndOfData() {
let end = Math.ceil(this._getEndOfHeaders() / SECTOR_SIZE)
getEndOfData() {
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
@@ -309,8 +309,8 @@ export default class Vhd {
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async _createBlock(blockId) {
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
async createBlock(blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
@@ -325,7 +325,7 @@ export default class Vhd {
}
// Write a bitmap at a block address.
async _writeBlockBitmap(blockAddr, bitmap) {
async writeBlockBitmap(blockAddr, bitmap) {
const { bitmapSize } = this
if (bitmap.length !== bitmapSize) {
@@ -342,20 +342,20 @@ export default class Vhd {
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async _writeEntireBlock(block) {
async writeEntireBlock(block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this._createBlock(block.id)
blockAddr = await this.createBlock(block.id)
}
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
async _writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
async writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this._createBlock(block.id)
blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
@@ -364,14 +364,14 @@ export default class Vhd {
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`_writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(parentBitmap, i)
}
await this._writeBlockBitmap(blockAddr, parentBitmap)
await this.writeBlockBitmap(blockAddr, parentBitmap)
await this._write(
block.data.slice(
sectorsToBytes(beginSectorId),
@@ -407,12 +407,12 @@ export default class Vhd {
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this._writeEntireBlock(block)
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
await this.writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
@@ -429,7 +429,7 @@ export default class Vhd {
const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this._getEndOfData(), eof - rawFooter.length)
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(
@@ -500,7 +500,7 @@ export default class Vhd {
endInBuffer
)
}
await this._writeBlockSectors(
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
@@ -509,7 +509,7 @@ export default class Vhd {
await this.writeFooter()
}
async _ensureSpaceForParentLocators(neededSectors) {
async ensureSpaceForParentLocators(neededSectors) {
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
@@ -528,7 +528,7 @@ export default class Vhd {
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this._ensureSpaceForParentLocators(dataSpaceSectors)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE

View File

@@ -31,11 +31,11 @@ test('createFooter() does not crash', () => {
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
logicalAddressBytes: 100,
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 700,
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -62,11 +62,11 @@ test('ReadableRawVHDStream does not crash', async () => {
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
logicalAddressBytes: 700,
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 100,
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -97,11 +97,11 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
logicalAddressBytes: blockSize * 3,
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: blockSize * 100,
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
@@ -109,7 +109,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const stream = await createReadableSparseStream(
fileSize,
blockSize,
blocks.map(b => b.logicalAddressBytes),
blocks.map(b => b.offsetBytes),
blocks
)
expect(stream.length).toEqual(4197888)
@@ -128,7 +128,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.logicalAddressBytes)
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -36,12 +36,12 @@
},
"dependencies": {
"archy": "^1.0.0",
"chalk": "^3.0.0",
"chalk": "^2.3.2",
"exec-promise": "^0.7.0",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.27.3"
"xen-api": "^0.27.2"
},
"devDependencies": {
"@babel/cli": "^7.1.5",

View File

@@ -4,7 +4,6 @@ process.env.DEBUG = '*'
const defer = require('golike-defer').default
const { CancelToken } = require('promise-toolbox')
const { createVhdStreamWithLength } = require('vhd-lib')
const { createClient } = require('../')
@@ -33,13 +32,8 @@ defer(async ($defer, args) => {
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
let input = createInputStream(args[2])
if (!raw && input.length === undefined) {
input = await createVhdStreamWithLength(input)
}
// https://xapi-project.github.io/xen-api/snapshots.html#uploading-a-disk-or-snapshot
await xapi.putResource(token, input, '/import_raw_vdi/', {
await xapi.putResource(token, createInputStream(args[2]), '/import_raw_vdi/', {
query: {
format: raw ? 'raw' : 'vhd',
vdi: await resolveRef(xapi, 'VDI', args[1])

View File

@@ -2,28 +2,6 @@
"requires": true,
"lockfileVersion": 1,
"dependencies": {
"@xen-orchestra/log": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/@xen-orchestra/log/-/log-0.2.0.tgz",
"integrity": "sha512-xNseJ/TIUdASm9uxr0zVvg8qDG+Xw6ycJy4dag+e1yl6pEr77GdPJD2R0JbE1BbZwup/Skh3TEh6L0GV+9NRdQ==",
"requires": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0"
}
},
"async-iterator-to-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/async-iterator-to-stream/-/async-iterator-to-stream-1.1.0.tgz",
"integrity": "sha512-ddF3u7ipixenFJsYCKqVR9tNdkIzd2j7JVg8QarqkfUl7UTR7nhJgc1Q+3ebP/5DNFhV9Co9F47FJjGpdc0PjQ==",
"requires": {
"readable-stream": "^3.0.5"
}
},
"core-js": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.4.1.tgz",
"integrity": "sha512-KX/dnuY/J8FtEwbnrzmAjUYgLqtk+cxM86hfG60LGiW3MmltIc2yAmDgBgEkfm0blZhUrdr1Zd84J2Y14mLxzg=="
},
"core-util-is": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
@@ -46,41 +24,6 @@
"node-gyp-build": "^3.7.0"
}
},
"from2": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
"integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
"requires": {
"inherits": "^2.0.1",
"readable-stream": "^2.0.0"
},
"dependencies": {
"readable-stream": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
}
}
},
"fs-extra": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
"integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
"requires": {
"graceful-fs": "^4.2.0",
"jsonfile": "^4.0.0",
"universalify": "^0.1.0"
}
},
"getopts": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
@@ -91,11 +34,6 @@
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
},
"graceful-fs": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
"integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
},
"human-format": {
"version": "0.10.1",
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
@@ -111,24 +49,6 @@
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
"integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
"requires": {
"graceful-fs": "^4.1.6"
}
},
"limit-concurrency-decorator": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/limit-concurrency-decorator/-/limit-concurrency-decorator-0.4.0.tgz",
"integrity": "sha512-hXGTuCkYjosfHT1D7dcPKzPHSGwBtZfN0wummzDwxi5A3ZUNBB75qM8phKEjQGlQGAfYrMW/JqhbaljO3xOH0A=="
},
"lodash": {
"version": "4.17.15",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
},
"make-error": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
@@ -221,11 +141,6 @@
"safe-buffer": "~5.1.0"
}
},
"struct-fu": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/struct-fu/-/struct-fu-1.2.1.tgz",
"integrity": "sha512-QrtfoBRe+RixlBJl852/Gu7tLLTdx3kWs3MFzY1OHNrSsYYK7aIAnzqsncYRWrKGG/QSItDmOTlELMxehw4Gjw=="
},
"throttle": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
@@ -260,47 +175,11 @@
}
}
},
"universalify": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
},
"uuid": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz",
"integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ=="
},
"vhd-lib": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/vhd-lib/-/vhd-lib-0.7.1.tgz",
"integrity": "sha512-TODzo7KjtNzYF/NuJjE5bPeGyXZIUzAOVJvED1dcPXr8iSnS6/U5aNdtKahBVwukEzf0/x+Cu3GMYutV4/cxsQ==",
"requires": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
"dependencies": {
"promise-toolbox": {
"version": "0.14.0",
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.14.0.tgz",
"integrity": "sha512-VV5lXK4lXaPB9oBO50ope1qd0AKN8N3nK14jYvV9/qFmfZW2Px/bJjPZBniGjXcIJf6J5Y/coNgJtPHDyiUV/g==",
"requires": {
"make-error": "^1.3.2"
}
}
}
},
"xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",

View File

@@ -7,7 +7,6 @@
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^0.7.2"
"throttle": "^1.0.3"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.27.3",
"version": "0.27.2",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [

View File

@@ -25,6 +25,7 @@ import isReadOnlyCall from './_isReadOnlyCall'
import makeCallSetting from './_makeCallSetting'
import parseUrl from './_parseUrl'
import replaceSensitiveValues from './_replaceSensitiveValues'
import XapiError from './_XapiError'
// ===================================================================
@@ -625,7 +626,9 @@ export class Xapi extends EventEmitter {
kindOf(result)
)
return result
} catch (error) {
} catch (e) {
const error = e instanceof Error ? e : XapiError.wrap(e)
// do not log the session ID
//
// TODO: should log at the session level to avoid logging sensitive
@@ -740,9 +743,9 @@ export class Xapi extends EventEmitter {
// the event loop in that case
if (this._pool.$ref !== oldPoolRef) {
// Uses introspection to list available types.
const types = (this._types = (
await this._interruptOnDisconnect(this._call('system.listMethods'))
)
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }

View File

@@ -1,8 +1,6 @@
import httpRequestPlus from 'http-request-plus'
import { format, parse } from 'json-rpc-protocol'
import XapiError from '../_XapiError'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
@@ -32,7 +30,7 @@ export default ({ allowUnauthorized, url }) => {
return response.result
}
throw XapiError.wrap(response.error)
throw response.error
},
error => {
if (error.response !== undefined) {

View File

@@ -1,8 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import XapiError from '../_XapiError'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
import UnsupportedTransport from './_UnsupportedTransport'
@@ -35,7 +33,7 @@ const parseResult = result => {
}
if (status !== 'Success') {
throw XapiError.wrap(result.ErrorDescription)
throw result.ErrorDescription
}
const value = result.Value

View File

@@ -1,8 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import XapiError from '../_XapiError'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
const logError = error => {
@@ -28,7 +26,7 @@ const parseResult = result => {
}
if (status !== 'Success') {
throw XapiError.wrap(result.ErrorDescription)
throw result.ErrorDescription
}
return result.Value

View File

@@ -26,12 +26,12 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "^7.0.0",
"bluebird": "^3.5.1",
"chalk": "^3.0.0",
"chalk": "^2.2.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.8.0",

View File

@@ -386,7 +386,7 @@ async function call(args) {
printProgress
)
return fromCallback(pump, response, progress, output)
return fromCallback(cb => pump(response, progress, output, cb))
}
if (key === '$sendTo') {

View File

@@ -260,10 +260,7 @@ describe('Collection', function() {
forEach(
{
'add & update → add': [
[
['add', 'foo', 0],
['update', 'foo', 1],
],
[['add', 'foo', 0], ['update', 'foo', 1]],
{
add: {
foo: 1,
@@ -271,19 +268,10 @@ describe('Collection', function() {
},
],
'add & remove → ∅': [
[
['add', 'foo', 0],
['remove', 'foo'],
],
{},
],
'add & remove → ∅': [[['add', 'foo', 0], ['remove', 'foo']], {}],
'update & update → update': [
[
['update', 'bar', 1],
['update', 'bar', 2],
],
[['update', 'bar', 1], ['update', 'bar', 2]],
{
update: {
bar: 2,
@@ -292,10 +280,7 @@ describe('Collection', function() {
],
'update & remove → remove': [
[
['update', 'bar', 1],
['remove', 'bar'],
],
[['update', 'bar', 1], ['remove', 'bar']],
{
remove: {
bar: undefined,
@@ -304,10 +289,7 @@ describe('Collection', function() {
],
'remove & add → update': [
[
['remove', 'bar'],
['add', 'bar', 0],
],
[['remove', 'bar'], ['add', 'bar', 0]],
{
update: {
bar: 0,

View File

@@ -1,4 +1,4 @@
import iteratee from 'lodash/iteratee'
import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import isEmpty from './is-empty'
@@ -17,9 +17,9 @@ export default class Index {
this._keysToHash = Object.create(null)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
}
// This method is used to compute the hash under which an item must

View File

@@ -1,4 +1,4 @@
import iteratee from 'lodash/iteratee'
import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import NotImplemented from './not-implemented'
@@ -16,9 +16,9 @@ export default class UniqueIndex {
this._keysToHash = Object.create(null)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
}
// This method is used to compute the hash under which an item must

View File

@@ -1,5 +1,4 @@
import createCallback from 'lodash/iteratee'
import forEach from 'lodash/forEach'
import { bind, forEach, iteratee as createCallback } from 'lodash'
import Collection, {
ACTION_ADD,
@@ -20,9 +19,9 @@ export default class View extends Collection {
this._onAdd(this._collection.all)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
// Register listeners.
this._collection.on(ACTION_ADD, this._onAdd)

View File

@@ -172,3 +172,11 @@ export const patchPrecheckFailed = create(20, ({ errorType, patch }) => ({
},
message: `patch precheck failed: ${errorType}`,
}))
export const listMissingPatchesFailed = create(21, ({ host, reason }) => ({
data: {
host,
reason,
},
message: 'could not fetch missing patches',
}))

View File

@@ -46,7 +46,7 @@
"@types/node": "^12.0.2",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^9.0.0",
"tslint-config-standard": "^8.0.1",
"typescript": "^3.1.6"
},
"scripts": {

View File

@@ -1,13 +1,13 @@
# xo-remote-parser [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/xo-remote-parser):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save xo-remote-parser
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
AGPL-3.0 © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.6.6",
"version": "0.6.5",
"license": "AGPL-3.0",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [

View File

@@ -1,7 +1,7 @@
/* eslint no-throw-literal: 0 */
import eventToPromise from 'event-to-promise'
import noop from 'lodash/noop'
import { bind, noop } from 'lodash'
import { createClient } from 'ldapjs'
import { escape } from 'ldapjs/lib/filters/escape'
import { promisify } from 'promise-toolbox'
@@ -9,11 +9,6 @@ import { readFile } from 'fs'
// ===================================================================
const DEFAULTS = {
checkCertificate: true,
filter: '(uid={{name}})',
}
const VAR_RE = /\{\{([^}]+)\}\}/g
const evalFilter = (filter, vars) =>
filter.replace(VAR_RE, (_, name) => {
@@ -48,7 +43,7 @@ If not specified, it will use a default set of well-known CAs.
description:
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
type: 'boolean',
defaults: DEFAULTS.checkCertificate,
default: true,
},
bind: {
description: 'Credentials to use before looking for the user record.',
@@ -81,11 +76,6 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
description: `
Filter used to find the user.
For LDAP if you want to filter for a special group you can try
something like:
- \`(&(uid={{name}})(memberOf=<group DN>))\`
For Microsoft Active Directory, you can try one of the following filters:
- \`(cn={{name}})\`
@@ -93,12 +83,13 @@ For Microsoft Active Directory, you can try one of the following filters:
- \`(sAMAccountName={{name}}@<domain>)\` (replace \`<domain>\` by your own domain)
- \`(userPrincipalName={{name}})\`
Or something like this if you also want to filter by group:
For LDAP if you want to filter for a special group you can try
something like:
- \`(&(sAMAccountName={{name}})(memberOf=<group DN>))\`
- \`(&(uid={{name}})(memberOf=<group DN>))\`
`.trim(),
type: 'string',
default: DEFAULTS.filter,
default: '(uid={{name}})',
},
},
required: ['uri', 'base'],
@@ -125,7 +116,7 @@ class AuthLdap {
constructor(xo) {
this._xo = xo
this._authenticate = this._authenticate.bind(this)
this._authenticate = bind(this._authenticate, this)
}
async configure(conf) {
@@ -136,11 +127,7 @@ class AuthLdap {
})
{
const {
bind,
checkCertificate = DEFAULTS.checkCertificate,
certificateAuthorities,
} = conf
const { bind, checkCertificate = true, certificateAuthorities } = conf
if (bind) {
clientOpts.bindDN = bind.dn
@@ -160,7 +147,7 @@ class AuthLdap {
const {
bind: credentials,
base: searchBase,
filter: searchFilter = DEFAULTS.filter,
filter: searchFilter = '(uid={{name}})',
} = conf
this._credentials = credentials

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { bind } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { readFile, writeFile } from 'fs'
@@ -16,7 +17,7 @@ const CACHE_FILE = './ldap.cache.conf'
execPromise(async args => {
const config = await promptSchema(
configurationSchema,
await fromCallback(readFile, CACHE_FILE, 'utf-8').then(
await fromCallback(cb => readFile(CACHE_FILE, 'utf-8', cb)).then(
JSON.parse,
() => ({})
)
@@ -43,6 +44,6 @@ execPromise(async args => {
}),
password: await password('Password'),
},
console.log.bind(console)
bind(console.log, console)
)
})

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.7.0",
"version": "0.6.0",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [

View File

@@ -2,10 +2,6 @@ import { Strategy } from 'passport-saml'
// ===================================================================
const DEFAULTS = {
disableRequestedAuthnContext: false,
}
export const configurationSchema = {
description:
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
@@ -34,11 +30,6 @@ You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddr
`,
type: 'string',
},
disableRequestedAuthnContext: {
title: "Don't request an authentication context",
description: 'This is known to help when using Active Directory',
default: DEFAULTS.disableRequestedAuthnContext,
},
},
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
}
@@ -55,7 +46,6 @@ class AuthSamlXoPlugin {
configure({ usernameField, ...conf }) {
this._usernameField = usernameField
this._conf = {
...DEFAULTS,
...conf,
// must match the callback URL

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.4",
"version": "0.16.2",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -36,7 +36,6 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/log": "^0.2.0",
"human-format": "^0.10.0",
"lodash": "^4.13.1",

View File

@@ -2,7 +2,6 @@ import createLogger from '@xen-orchestra/log'
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, groupBy, startCase } from 'lodash'
import { get } from '@xen-orchestra/defined'
import pkg from '../package'
const logger = createLogger('xo:xo-server-backup-reports')
@@ -187,7 +186,7 @@ const MARKDOWN_BY_TYPE = {
}
const getMarkdown = (task, props) =>
MARKDOWN_BY_TYPE[task.data?.type]?.(task, props)
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
const toMarkdown = parts => {
const lines = []
@@ -318,7 +317,6 @@ class BackupReportsXoPlugin {
const taskMarkdown = await getMarkdown(task, {
formatDate,
jobName: log.jobName,
xo,
})
if (taskMarkdown === undefined) {
continue
@@ -356,7 +354,7 @@ class BackupReportsXoPlugin {
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
@@ -366,10 +364,9 @@ class BackupReportsXoPlugin {
})
}
async _ngVmHandler(log, { name: jobName, settings }, schedule, force) {
async _ngVmHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const mailReceivers = get(() => settings[''].reportRecipients)
const { reportWhen, mode } = log.data || {}
const formatDate = createDateFormatter(schedule?.timezone)
@@ -392,9 +389,8 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
mailReceivers,
markdown: toMarkdown(markdown),
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
@@ -646,12 +642,11 @@ class BackupReportsXoPlugin {
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
mailReceivers,
markdown: toMarkdown(markdown),
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
@@ -661,18 +656,12 @@ class BackupReportsXoPlugin {
})
}
_sendReport({
mailReceivers = this._mailsReceivers,
markdown,
nagiosMarkdown,
subject,
success,
}) {
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: mailReceivers,
to: this._mailsReceivers,
subject,
markdown,
}),
@@ -687,14 +676,9 @@ class BackupReportsXoPlugin {
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: success ? 0 : 2,
status: nagiosStatus,
message: nagiosMarkdown,
}),
xo.sendIcinga2Status !== undefined &&
xo.sendIcinga2Status({
status: success ? 'OK' : 'CRITICAL',
message: markdown,
}),
])
}
@@ -724,7 +708,7 @@ class BackupReportsXoPlugin {
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
})
}
@@ -920,7 +904,7 @@ class BackupReportsXoPlugin {
? ICON_FAILURE
: ICON_SKIPPED
}`,
success: globalSuccess,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${

View File

@@ -1,4 +1,8 @@
# xo-server-web-hooks [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
# xo-server-cloud [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Usage
@@ -26,7 +30,7 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:

View File

@@ -1,21 +1,20 @@
{
"name": "xo-server-web-hooks",
"version": "0.1.0",
"license": "AGPL-3.0",
"name": "xo-server-cloud",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [
"hooks",
"cloud",
"orchestra",
"plugin",
"web",
"xen",
"xen-orchestra",
"xo-server"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-web-hooks",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-web-hooks",
"directory": "packages/xo-server-cloud",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -30,25 +29,24 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"http-request-plus": "^0.8.0",
"lodash": "^4.17.15"
"jsonrpc-websocket-client": "^0.5.0"
},
"devDependencies": {
"@babel/cli": "^7.7.0",
"@babel/core": "^7.7.2",
"@babel/plugin-proposal-optional-chaining": "^7.6.0",
"@babel/preset-env": "^7.7.1",
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},

View File

@@ -0,0 +1,208 @@
import Client, { createBackoff } from 'jsonrpc-websocket-client'
import hrp from 'http-request-plus'
const WS_URL = 'ws://localhost:9001'
const HTTP_URL = 'http://localhost:9002'
// ===================================================================
class XoServerCloud {
constructor({ xo }) {
this._xo = xo
// Defined in configure().
this._conf = null
this._key = null
}
configure(configuration) {
this._conf = configuration
}
async load() {
const getResourceCatalog = this._getCatalog.bind(this)
getResourceCatalog.description =
"Get the list of user's available resources"
getResourceCatalog.permission = 'admin'
getResourceCatalog.params = {
filters: { type: 'object', optional: true },
}
const registerResource = ({ namespace }) =>
this._registerResource(namespace)
registerResource.description = 'Register a resource via cloud plugin'
registerResource.params = {
namespace: {
type: 'string',
},
}
registerResource.permission = 'admin'
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
this
)
downloadAndInstallResource.description =
'Download and install a resource via cloud plugin'
downloadAndInstallResource.params = {
id: { type: 'string' },
namespace: { type: 'string' },
version: { type: 'string' },
sr: { type: 'string' },
}
downloadAndInstallResource.resolve = {
sr: ['sr', 'SR', 'administrate'],
}
downloadAndInstallResource.permission = 'admin'
this._unsetApiMethods = this._xo.addApiMethods({
cloud: {
downloadAndInstallResource,
getResourceCatalog,
registerResource,
},
})
this._unsetRequestResource = this._xo.defineProperty(
'requestResource',
this._requestResource,
this
)
const updater = (this._updater = new Client(WS_URL))
const connect = () =>
updater.open(createBackoff()).catch(error => {
console.error('xo-server-cloud: fail to connect to updater', error)
return connect()
})
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
console.warn('xo-server-cloud: next attempt in %s ms', delay)
})
connect()
}
unload() {
this._unsetApiMethods()
this._unsetRequestResource()
}
// ----------------------------------------------------------------
async _getCatalog({ filters } = {}) {
const catalog = await this._updater.call('getResourceCatalog', { filters })
if (!catalog) {
throw new Error('cannot get catalog')
}
return catalog
}
// ----------------------------------------------------------------
async _getNamespaces() {
const catalog = await this._getCatalog()
if (!catalog._namespaces) {
throw new Error('cannot get namespaces')
}
return catalog._namespaces
}
// ----------------------------------------------------------------
async _downloadAndInstallResource({ id, namespace, sr, version }) {
const stream = await this._requestResource({
hub: true,
id,
namespace,
version,
})
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
srId: sr.id,
type: 'xva',
})
await vm.update_other_config({
'xo:resource:namespace': namespace,
'xo:resource:xva:version': version,
'xo:resource:xva:id': id,
})
}
// ----------------------------------------------------------------
async _registerResource(namespace) {
const _namespace = (await this._getNamespaces())[namespace]
if (_namespace === undefined) {
throw new Error(`${namespace} is not available`)
}
if (_namespace.registered || _namespace.pending) {
throw new Error(`already registered for ${namespace}`)
}
return this._updater.call('registerResource', { namespace })
}
// ----------------------------------------------------------------
async _getNamespaceCatalog({ hub, namespace }) {
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
namespace
]
if (!namespaceCatalog) {
throw new Error(`cannot get catalog: ${namespace} not registered`)
}
return namespaceCatalog
}
// ----------------------------------------------------------------
async _requestResource({ hub = false, id, namespace, version }) {
const _namespace = (await this._getNamespaces())[namespace]
if (!hub && (!_namespace || !_namespace.registered)) {
throw new Error(`cannot get resource: ${namespace} not registered`)
}
const { _token: token } = await this._getNamespaceCatalog({
hub,
namespace,
})
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
if (token === undefined) {
throw new Error(`${namespace} namespace token is undefined`)
}
const downloadToken = await this._updater.call('getResourceDownloadToken', {
token,
id,
version,
})
if (!downloadToken) {
throw new Error('cannot get download token')
}
const response = await hrp(HTTP_URL, {
headers: {
Authorization: `Bearer ${downloadToken}`,
},
})
// currently needed for XenApi#putResource()
response.length = response.headers['content-length']
return response
}
}
export default opts => new XoServerCloud(opts)

View File

@@ -31,7 +31,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"lodash": "^4.16.2"
},
"devDependencies": {

View File

@@ -21,7 +21,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"d3-time-format": "^2.1.1",
"json5": "^2.0.1",
"lodash": "^4.17.4"

View File

@@ -15,9 +15,9 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.3.1",
"version": "0.3.0",
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
@@ -30,7 +30,7 @@
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.101",
"node-openssl-cert": "^0.0.98",
"promise-toolbox": "^0.14.0",
"uuid": "^3.3.2"
},

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,8 @@ export class OvsdbClient {
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
- `other_config`:
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
Attributes on created OVS interfaces:
- `options`:
@@ -66,49 +67,55 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
network,
networkUuid,
networkName,
remoteAddress,
encapsulation,
key,
password,
privateNetworkUuid
remoteNetwork
) {
if (
this._adding.find(
elem => elem.id === network.uuid && elem.addr === remoteAddress
elem => elem.id === networkUuid && elem.addr === remoteAddress
) !== undefined
) {
return
}
const adding = { id: network.uuid, addr: remoteAddress }
const adding = { id: networkUuid, addr: remoteAddress }
this._adding.push(adding)
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridge,
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return bridge.name
return bridgeName
}
const index = ++this._numberOfPortAndInterface
const interfaceName = bridge.name + '_iface' + index
const portName = bridge.name + '_port' + index
const interfaceName = bridgeName + '_iface' + index
const portName = bridgeName + '_port' + index
// Add interface and port to the bridge
const options = { remote_ip: remoteAddress, key: key }
@@ -132,9 +139,11 @@ export class OvsdbClient {
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: toMap({
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
}),
other_config: toMap(
remoteNetwork !== undefined
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
: { 'xo:sdn-controller:private-pool-wide': 'true' }
),
},
'uuid-name': 'new_port',
}
@@ -142,7 +151,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
@@ -154,7 +163,7 @@ export class OvsdbClient {
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
if (jsonObjects === undefined) {
socket.destroy()
@@ -180,8 +189,8 @@ export class OvsdbClient {
details,
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
@@ -191,24 +200,33 @@ export class OvsdbClient {
log.debug('Port and interface added to bridge', {
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
return bridge.name
return bridgeName
}
async resetForNetwork(network, privateNetworkUuid) {
async resetForNetwork(
networkUuid,
networkName,
crossPoolOnly,
remoteNetwork
) {
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridge, socket)
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
socket.destroy()
return
@@ -232,14 +250,15 @@ export class OvsdbClient {
// 2019-09-03
// Compatibility code, to be removed in 1 year.
const oldShouldDelete =
config[0] === 'private_pool_wide' ||
config[0] === 'cross_pool' ||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
config[0] === 'xo:sdn-controller:cross-pool'
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
(config[0] === 'cross_pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
const shouldDelete =
config[0] === 'xo:sdn-controller:private-network-uuid' &&
config[1] === privateNetworkUuid
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
!crossPoolOnly) ||
(config[0] === 'xo:sdn-controller:cross-pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
if (shouldDelete || oldShouldDelete) {
portsToDelete.push(['uuid', portUuid])
@@ -256,7 +275,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
@@ -269,7 +288,7 @@ export class OvsdbClient {
if (jsonObjects[0].error != null) {
log.error('Error while deleting ports from bridge', {
error: jsonObjects[0].error,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -278,7 +297,7 @@ export class OvsdbClient {
log.debug('Ports deleted from bridge', {
nPorts: jsonObjects[0].result[0].count,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -316,9 +335,9 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _getBridgeForNetwork(network, socket) {
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
]
const selectResult = await this._select(
'Bridge',
@@ -328,17 +347,25 @@ export class OvsdbClient {
)
if (selectResult === undefined) {
log.error('No bridge found for network', {
network: network.name_label,
network: networkName,
host: this.host.name_label,
})
return {}
return []
}
return { uuid: selectResult._uuid[1], name: selectResult.name }
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
const ports = await this._getBridgePorts(bridge, socket)
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
return false
}
@@ -366,8 +393,8 @@ export class OvsdbClient {
return false
}
async _getBridgePorts(bridge, socket) {
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult === undefined) {
return

View File

@@ -1,202 +0,0 @@
import createLogger from '@xen-orchestra/log'
import { filter, find, forOwn, map, sample } from 'lodash'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:private-network')
// =============================================================================
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
const createPassword = () =>
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
// =============================================================================
export class PrivateNetwork {
constructor(controller, uuid) {
this.controller = controller
this.uuid = uuid
this.networks = {}
}
// ---------------------------------------------------------------------------
async addHost(host) {
if (host.$ref === this.center?.$ref) {
// Nothing to do
return
}
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
log.error('No OVSDB client found', {
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
const centerClient = this.controller.ovsdbClients[this.center.$ref]
if (centerClient === undefined) {
log.error('No OVSDB client found for star-center', {
privateNetwork: this.uuid,
host: this.center.name_label,
pool: this.center.$pool.name_label,
})
return
}
const network = this.networks[host.$pool.uuid]
const centerNetwork = this.networks[this.center.$pool.uuid]
const otherConfig = network.other_config
const encapsulation =
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
const password =
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
])
} catch (error) {
log.error('Error while connecting host to private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
log.info('Host added', {
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return bridgeName
}
addNetwork(network) {
this.networks[network.$pool.uuid] = network
log.info('Adding network', {
privateNetwork: this.uuid,
network: network.name_label,
pool: network.$pool.name_label,
})
if (this.center === undefined) {
return this.electNewCenter()
}
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
return Promise.all(
map(hosts, async host => {
const hostClient = this.controller.ovsdbClients[host.$ref]
const network = this.networks[host.$pool.uuid]
await hostClient.resetForNetwork(network, this.uuid)
await this.addHost(host)
})
)
}
async electNewCenter() {
delete this.center
// TODO: make it random
const hosts = this._getHosts()
for (const host of hosts) {
const pif = find(host.$PIFs, {
network: this.networks[host.$pool.uuid].$ref,
})
if (pif?.currently_attached && host.$metrics.live) {
this.center = host
break
}
}
if (this.center === undefined) {
log.error('No available host to elect new star-center', {
privateNetwork: this.uuid,
})
return
}
await this._reset()
// Recreate star topology
await Promise.all(map(hosts, host => this.addHost(host)))
log.info('New star-center elected', {
center: this.center.name_label,
privateNetwork: this.uuid,
})
}
// ---------------------------------------------------------------------------
getPools() {
const pools = []
forOwn(this.networks, network => {
pools.push(network.$pool)
})
return pools
}
// ---------------------------------------------------------------------------
_reset() {
return Promise.all(
map(this._getHosts(), async host => {
// Clean old ports and interfaces
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
return
}
const network = this.networks[host.$pool.uuid]
try {
await hostClient.resetForNetwork(network, this.uuid)
} catch (error) {
log.error('Error while resetting private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: network.$pool.name_label,
})
}
})
)
}
// ---------------------------------------------------------------------------
_getHosts() {
const hosts = []
forOwn(this.networks, network => {
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
})
return hosts
}
}

View File

@@ -8,8 +8,5 @@
"directory": "packages/xo-server-test-plugin",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": "*"
}
}

View File

@@ -14,7 +14,6 @@
[vms]
default = ''
withOsAndXenTools = ''
# vmToBackup = ''
[templates]

View File

@@ -154,19 +154,6 @@ class XoConnection extends Xo {
})
}
async startTempVm(id, params, withXenTools = false) {
await this.call('vm.start', { id, ...params })
this._tempResourceDisposers.push('vm.stop', { id, force: true })
return this.waitObjectState(id, vm => {
if (
vm.power_state !== 'Running' ||
(withXenTools && vm.xenTools === false)
) {
throw new Error('retry')
}
})
}
async createTempRemote(params) {
const remote = await this.call('remote.create', params)
this._tempResourceDisposers.push('remote.delete', { id: remote.id })

View File

@@ -55,68 +55,6 @@ Object {
}
`;
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
Object {
"data": Object {

View File

@@ -584,110 +584,4 @@ describe('backupNg', () => {
})
})
})
test('create and execute backup with enabled offline backup', async () => {
const vm = xo.objects.all[config.vms.withOsAndXenTools]
if (vm.power_state !== 'Running') {
await xo.startTempVm(vm.id, { force: true }, true)
}
const scheduleTempId = randomId()
const srId = config.srs.default
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const backupInput = {
mode: 'full',
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
},
settings: {
'': {
offlineBackup: true,
},
[scheduleTempId]: {
copyRetention: 1,
exportRetention: 1,
},
},
srs: {
id: srId,
},
vms: {
id: vm.id,
},
}
const backup = await xo.createTempBackupNgJob(backupInput)
expect(backup.settings[''].offlineBackup).toBe(true)
const schedule = await xo.getSchedule({ jobId: backup.id })
await Promise.all([
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Halted') {
throw new Error('retry')
}
}),
])
await xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Running') {
throw new Error('retry')
}
})
const backupLogs = await xo.getBackupLogs({
jobId: backup.id,
scheduleId: schedule.id,
})
expect(backupLogs.length).toBe(1)
const { tasks, ...log } = backupLogs[0]
validateRootTask(log, {
data: {
mode: backupInput.mode,
reportWhen: backupInput.settings[''].reportWhen,
},
jobId: backup.id,
jobName: backupInput.name,
scheduleId: schedule.id,
status: 'success',
})
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...vmTask }) => {
validateVmTask(vmTask, vm.id, { status: 'success' })
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...subTask }) => {
expect(subTask.message).not.toBe('snapshot')
if (subTask.message === 'export') {
validateExportTask(
subTask,
subTask.data.type === 'remote' ? remoteId : srId,
{
data: expect.any(Object),
status: 'success',
}
)
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(operationTask => {
if (
operationTask.message === 'transfer' ||
operationTask.message === 'merge'
) {
validateOperationTask(operationTask, {
result: { size: expect.any(Number) },
status: 'success',
})
}
})
}
})
})
}, 200e3)
})

View File

@@ -60,16 +60,14 @@ describe('server', () => {
autoConnect: false,
})
expect(
(
await rejectionOf(
addServer({
host: 'xen1.example.org',
username: 'root',
password: 'password',
autoConnect: false,
})
)
).message
(await rejectionOf(
addServer({
host: 'xen1.example.org',
username: 'root',
password: 'password',
autoConnect: false,
})
)).message
).toBe('unknown error from the peer')
})

View File

@@ -60,15 +60,13 @@ describe('cd', () => {
await getOrWaitCdVbdPosition(vmId)
expect(
(
await rejectionOf(
xo.call('vm.insertCd', {
id: vmId,
cd_id: config.ubuntuIsoId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.insertCd', {
id: vmId,
cd_id: config.ubuntuIsoId,
force: false,
})
)).message
).toBe('unknown error from the peer')
})

View File

@@ -126,14 +126,12 @@ describe('the VM life cyle', () => {
})
expect(
(
await rejectionOf(
xo.call('vm.restart', {
id: hvmWithoutToolsId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.restart', {
id: hvmWithoutToolsId,
force: false,
})
)).message
).toBe('VM lacks feature shutdown')
})
@@ -198,14 +196,12 @@ describe('the VM life cyle', () => {
})
expect(
(
await rejectionOf(
xo.call('vm.stop', {
id: hvmWithoutToolsId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.stop', {
id: hvmWithoutToolsId,
force: false,
})
)).message
).toBe('clean shutdown requires PV drivers')
})

View File

@@ -1,53 +0,0 @@
# xo-server-transport-icinga2 [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
> xo-server plugin to send status to icinga2 server
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Usage
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
## Development
### `Xo#sendIcinga2Status({ status, message })`
This xo method is called to send a passive check to icinga2 and change the status of a service.
It has two parameters:
- status: it's the service status in icinga2 (0: OK | 1: WARNING | 2: CRITICAL | 3: UNKNOWN).
- message: it's the status information in icinga2.
```
# Install dependencies
> npm install
# Run the tests
> npm test
# Continuously compile
> npm run dev
# Continuously run the tests
> npm run dev-test
# Build for production (automatically called by npm install)
> npm run build
```
## Contributions
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
AGPL3 © [Vates SAS](https://vates.fr)

View File

@@ -1,32 +0,0 @@
{
"name": "xo-server-transport-icinga2",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-transport-icinga2",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-transport-icinga2",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "./dist",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.1.0",
"engines": {
"node": ">=8.9.4"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0"
},
"private": true
}

View File

@@ -1,136 +0,0 @@
import assert from 'assert'
import { URL } from 'url'
// =============================================================================
export const configurationSchema = {
type: 'object',
properties: {
server: {
type: 'string',
description: `
The icinga2 server http/https address.
*If no port is provided in the URL, 5665 will be used.*
Examples:
- https://icinga2.example.com
- http://192.168.0.1:1234
`.trim(),
},
user: {
type: 'string',
description: 'The icinga2 server username',
},
password: {
type: 'string',
description: 'The icinga2 server password',
},
filter: {
type: 'string',
description: `
The filter to use
See: https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/#filters
Example:
- Monitor the backup jobs of the VMs of a specific host:
\`host.name=="xoa.example.com" && service.name=="xo-backup"\`
`.trim(),
},
acceptUnauthorized: {
type: 'boolean',
description: 'Accept unauthorized certificates',
default: false,
},
},
additionalProperties: false,
required: ['server'],
}
// =============================================================================
const STATUS_MAP = {
OK: 0,
WARNING: 1,
CRITICAL: 2,
UNKNOWN: 3,
}
// =============================================================================
class XoServerIcinga2 {
constructor({ xo }) {
this._xo = xo
}
// ---------------------------------------------------------------------------
configure(configuration) {
const serverUrl = new URL(configuration.server)
if (configuration.user !== '') {
serverUrl.username = configuration.user
}
if (configuration.password !== '') {
serverUrl.password = configuration.password
}
if (serverUrl.port === '') {
serverUrl.port = '5665' // Default icinga2 access port
}
serverUrl.pathname = '/v1/actions/process-check-result'
this._url = serverUrl.href
this._filter =
configuration.filter !== undefined ? configuration.filter : ''
this._acceptUnauthorized = configuration.acceptUnauthorized
}
load() {
this._unset = this._xo.defineProperty(
'sendIcinga2Status',
this._sendIcinga2Status,
this
)
}
unload() {
this._unset()
}
test() {
return this._sendIcinga2Status({
message:
'The server-icinga2 plugin for Xen Orchestra server seems to be working fine, nicely done :)',
status: 'OK',
})
}
// ---------------------------------------------------------------------------
_sendIcinga2Status({ message, status }) {
const icinga2Status = STATUS_MAP[status]
assert(icinga2Status !== undefined, `Invalid icinga2 status: ${status}`)
return this._xo
.httpRequest(this._url, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
},
rejectUnauthorized: !this._acceptUnauthorized,
body: JSON.stringify({
type: 'Service',
filter: this._filter,
plugin_output: message,
exit_status: icinga2Status,
}),
})
.readAll()
}
}
// =============================================================================
export default opts => new XoServerIcinga2(opts)

View File

@@ -35,6 +35,11 @@ export const configurationSchema = {
// ===================================================================
const bind = (fn, thisArg) =>
function __bound__() {
return fn.apply(thisArg, arguments)
}
function nscaPacketBuilder({ host, iv, message, service, status, timestamp }) {
// Building NSCA packet
const SIZE = 720
@@ -77,8 +82,8 @@ const ENCODING = 'binary'
class XoServerNagios {
constructor({ xo }) {
this._sendPassiveCheck = this._sendPassiveCheck.bind(this)
this._set = xo.defineProperty.bind(xo)
this._sendPassiveCheck = bind(this._sendPassiveCheck, this)
this._set = bind(xo.defineProperty, xo)
this._unset = null
// Defined in configure().

View File

@@ -36,7 +36,7 @@
},
"dependencies": {
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"@xen-orchestra/log": "^0.2.0",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",

View File

@@ -1,3 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,160 +0,0 @@
import createLogger from '@xen-orchestra/log'
const log = createLogger('xo:web-hooks')
function handleHook(type, data) {
const hooks = this._hooks[data.method]?.[type]
if (hooks !== undefined) {
return Promise.all(
hooks.map(({ url }) =>
this._makeRequest(url, type, data).catch(error => {
log.error('web hook failed', {
error,
webHook: { ...data, url, type },
})
})
)
)
}
}
class XoServerHooks {
constructor({ xo }) {
this._xo = xo
// Defined in configure().
this._hooks = null
this._handlePreHook = handleHook.bind(this, 'pre')
this._handlePostHook = handleHook.bind(this, 'post')
}
_makeRequest(url, type, data) {
return this._xo.httpRequest(url, {
body: JSON.stringify({ ...data, type }),
headers: { 'Content-Type': 'application/json' },
method: 'POST',
onRequest: req => {
req.setTimeout(1e4)
req.on('timeout', req.abort)
},
})
}
configure(configuration) {
// this._hooks = {
// 'vm.start': {
// pre: [
// {
// method: 'vm.start',
// type: 'pre',
// url: 'https://my-domain.net/xo-hooks?action=vm.start'
// },
// ...
// ],
// post: [
// ...
// ]
// },
// ...
// }
const hooks = {}
for (const hook of configuration.hooks) {
if (hooks[hook.method] === undefined) {
hooks[hook.method] = {}
}
hook.type.split('/').forEach(type => {
if (hooks[hook.method][type] === undefined) {
hooks[hook.method][type] = []
}
hooks[hook.method][type].push(hook)
})
}
this._hooks = hooks
}
load() {
this._xo.on('xo:preCall', this._handlePreHook)
this._xo.on('xo:postCall', this._handlePostHook)
}
unload() {
this._xo.removeListener('xo:preCall', this._handlePreHook)
this._xo.removeListener('xo:postCall', this._handlePostHook)
}
async test({ url }) {
await this._makeRequest(url, 'pre', {
callId: '0',
userId: 'b4tm4n',
userName: 'bruce.wayne@waynecorp.com',
method: 'vm.start',
params: { id: '67aac198-0174-11ea-8d71-362b9e155667' },
timestamp: 0,
})
await this._makeRequest(url, 'post', {
callId: '0',
userId: 'b4tm4n',
userName: 'bruce.wayne@waynecorp.com',
method: 'vm.start',
result: '',
timestamp: 500,
duration: 500,
})
}
}
export const configurationSchema = ({ xo: { apiMethods } }) => ({
description: 'Bind XO API calls to HTTP requests.',
type: 'object',
properties: {
hooks: {
type: 'array',
title: 'Hooks',
items: {
type: 'object',
title: 'Hook',
properties: {
method: {
description: 'The method to be bound to',
enum: Object.keys(apiMethods).sort(),
title: 'Method',
type: 'string',
},
type: {
description:
'Right before the API call *or* right after the action has been completed',
enum: ['pre', 'post', 'pre/post'],
title: 'Type',
type: 'string',
},
url: {
description: 'The full URL you wish the request to be sent to',
// It would be more convenient to configure 1 URL for multiple
// triggers but the UI implementation is not ideal for such a deep
// configuration schema: https://i.imgur.com/CpvAwPM.png
title: 'URL',
type: 'string',
},
},
required: ['method', 'type', 'url'],
},
},
},
required: ['hooks'],
})
export const testSchema = {
type: 'object',
description:
'The test will simulate a hook on `vm.start` (both "pre" and "post" hooks)',
properties: {
url: {
title: 'URL',
type: 'string',
description: 'The URL the test request will be sent to',
},
},
}
export default opts => new XoServerHooks(opts)

View File

@@ -50,28 +50,15 @@ maxTokenValidity = '0.5 year'
# https://developer.mozilla.org/fr/docs/Web/HTTP/Headers/Set-Cookie#Session_cookie
#sessionCookieValidity = '10 hours'
# This is the page where unauthenticated users will be redirected to.
#
# For instance, it can be changed to `/signin/saml` if that's the provider that
# should be used by default.
defaultSignInPage = '/signin'
[backup]
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674
maxMergedDeltasPerRun = 2
# Duration for which we can wait for the backup size before returning
#
# It should be short to avoid blocking the display of the available backups.
vmBackupSizeTimeout = '2 seconds'
poolMetadataTimeout = '10 minutes'
# Helmet handles HTTP security via headers
#
# https://helmetjs.github.io/docs/
@@ -107,6 +94,3 @@ timeout = 600e3
# see https:#github.com/vatesfr/xen-orchestra/issues/3419
# useSudo = false
[xapiOptions]
maxUncoalescedVdis = 1

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.53.0",
"version": "5.50.1",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -30,15 +30,15 @@
"bin": "bin"
},
"engines": {
"node": ">=8.10"
"node": ">=8"
},
"dependencies": {
"@iarna/toml": "^2.2.1",
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.10.2",
"@xen-orchestra/fs": "^0.10.1",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/mixin": "^0.0.0",
"ajv": "^6.1.1",
@@ -60,7 +60,7 @@
"deptree": "^1.0.0",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"execa": "^3.2.0",
"execa": "^2.0.5",
"express": "^4.16.2",
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
@@ -115,22 +115,21 @@
"split-lines": "^2.0.0",
"stack-chain": "^2.0.0",
"stoppable": "^1.0.5",
"strict-timeout": "^1.0.0",
"struct-fu": "^1.2.0",
"tar-stream": "^2.0.1",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.7.2",
"vhd-lib": "^0.7.0",
"ws": "^7.1.2",
"xen-api": "^0.27.3",
"xen-api": "^0.27.2",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-remote-parser": "^0.5.0",
"xo-vmdk-to-vhd": "^0.1.8",
"xo-vmdk-to-vhd": "^0.1.7",
"yazl": "^2.4.3"
},
"devDependencies": {

View File

@@ -1,4 +1,5 @@
import fromCallback from 'promise-toolbox/fromCallback'
import { execFile } from 'child_process'
export const read = key => fromCallback(execFile, 'xenstore-read', [key])
export const read = key =>
fromCallback(cb => execFile('xenstore-read', [key], cb))

View File

@@ -0,0 +1,36 @@
import iteratee from 'lodash/iteratee'
import pDelay from 'promise-toolbox/delay'
function stopRetry(error) {
this.error = error
// eslint-disable-next-line no-throw-literal
throw this
}
// do not retry on ReferenceError and TypeError which are programmer errors
const defaultMatcher = error =>
!(error instanceof ReferenceError || error instanceof TypeError)
export default async function pRetry(
fn,
{ delay = 1e3, tries = 10, when } = {}
) {
const container = { error: undefined }
const stop = stopRetry.bind(container)
when = when === undefined ? defaultMatcher : iteratee(when)
while (true) {
try {
return await fn(stop)
} catch (error) {
if (error === container) {
throw container.error
}
if (--tries === 0 || !when(error)) {
throw error
}
}
await pDelay(delay)
}
}

View File

@@ -0,0 +1,95 @@
/* eslint-env jest */
import { forOwn } from 'lodash'
import pRetry from './_pRetry'
describe('pRetry()', () => {
it('retries until the function succeeds', async () => {
let i = 0
expect(
await pRetry(
() => {
if (++i < 3) {
throw new Error()
}
return 'foo'
},
{ delay: 0 }
)
).toBe('foo')
expect(i).toBe(3)
})
it('returns the last error', async () => {
let tries = 5
const e = new Error()
await expect(
pRetry(
() => {
throw --tries > 0 ? new Error() : e
},
{ delay: 0, tries }
)
).rejects.toBe(e)
})
;[ReferenceError, TypeError].forEach(ErrorType => {
it(`does not retry if a ${ErrorType.name} is thrown`, async () => {
let i = 0
await expect(
pRetry(() => {
++i
throw new ErrorType()
})
).rejects.toBeInstanceOf(ErrorType)
expect(i).toBe(1)
})
})
it('does not retry if `stop` callback is called', async () => {
const e = new Error()
let i = 0
await expect(
pRetry(stop => {
++i
stop(e)
})
).rejects.toBe(e)
expect(i).toBe(1)
})
describe('`when` option', () => {
forOwn(
{
'with function predicate': _ => _.message === 'foo',
'with object predicate': { message: 'foo' },
},
(when, title) =>
describe(title, () => {
it('retries when error matches', async () => {
let i = 0
await pRetry(
() => {
++i
throw new Error('foo')
},
{ when, tries: 2 }
).catch(Function.prototype)
expect(i).toBe(2)
})
it('does not retry when error does not match', async () => {
let i = 0
await pRetry(
() => {
++i
throw new Error('bar')
},
{ when, tries: 2 }
).catch(Function.prototype)
expect(i).toBe(1)
})
})
)
})
})

View File

@@ -1,16 +0,0 @@
// waits for all promises to be settled
//
// rejects with the first rejection if any
export const waitAll = async iterable => {
let reason
const onReject = r => {
if (reason === undefined) {
reason = r
}
}
await Promise.all(Array.from(iterable, promise => promise.catch(onReject)))
if (reason !== undefined) {
throw reason
}
}

View File

@@ -168,7 +168,7 @@ runJob.params = {
async function handleGetAllLogs(req, res) {
const logs = await this.getBackupNgLogs()
res.set('Content-Type', 'application/json')
return fromCallback(pipeline, createNdJsonStream(logs), res)
return fromCallback(cb => pipeline(createNdJsonStream(logs), res, cb))
}
export function getAllLogs({ ndjson = false }) {
@@ -225,14 +225,13 @@ deleteVmBackup.params = {
},
}
export function listVmBackups({ remotes, _forceRefresh }) {
return this.listVmBackupsNg(remotes, _forceRefresh)
export function listVmBackups({ remotes }) {
return this.listVmBackupsNg(remotes)
}
listVmBackups.permission = 'admin'
listVmBackups.params = {
_forceRefresh: { type: 'boolean', optional: true },
remotes: {
type: 'array',
items: {

View File

@@ -1,12 +1,9 @@
import createLogger from '@xen-orchestra/log'
import pump from 'pump'
import convertVmdkToVhdStream from 'xo-vmdk-to-vhd'
import { format, JsonRpcError } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors'
import { peekFooterFromVhdStream } from 'vhd-lib'
import { parseSize } from '../utils'
import { VDI_FORMAT_VHD } from '../xapi'
const log = createLogger('xo:disk')
@@ -168,97 +165,3 @@ resize.params = {
resize.resolve = {
vdi: ['id', ['VDI', 'VDI-snapshot'], 'administrate'],
}
async function handleImport(
req,
res,
{ type, name, description, vmdkData, srId, xapi }
) {
req.setTimeout(43200000) // 12 hours
try {
req.length = req.headers['content-length']
let vhdStream, size
if (type === 'vmdk') {
vhdStream = await convertVmdkToVhdStream(
req,
vmdkData.grainLogicalAddressList,
vmdkData.grainFileOffsetList
)
size = vmdkData.capacity
} else if (type === 'vhd') {
vhdStream = req
const footer = await peekFooterFromVhdStream(req)
size = footer.currentSize
} else {
throw new Error(
`Unknown disk type, expected "vhd" or "vmdk", got ${type}`
)
}
const vdi = await xapi.createVdi({
name_description: description,
name_label: name,
size,
sr: srId,
})
try {
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
res.end(format.response(0, vdi.$id))
} catch (e) {
await xapi.deleteVdi(vdi)
throw e
}
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
}
}
// type is 'vhd' or 'vmdk'
async function importDisk({ sr, type, name, description, vmdkData }) {
return {
$sendTo: await this.registerHttpRequest(handleImport, {
description,
name,
srId: sr._xapiId,
type,
vmdkData,
xapi: this.getXapi(sr),
}),
}
}
export { importDisk as import }
importDisk.params = {
description: { type: 'string', optional: true },
name: { type: 'string' },
sr: { type: 'string' },
type: { type: 'string' },
vmdkData: {
type: 'object',
optional: true,
properties: {
capacity: { type: 'integer' },
grainLogicalAddressList: {
description:
'virtual address of the blocks on the disk (LBA), in order encountered in the VMDK',
type: 'array',
items: {
type: 'integer',
},
},
grainFileOffsetList: {
description:
'offset of the grains in the VMDK file, in order encountered in the VMDK',
optional: true,
type: 'array',
items: {
type: 'integer',
},
},
},
},
}
importDisk.resolve = {
sr: ['sr', 'SR', 'administrate'],
}

View File

@@ -1,12 +1,10 @@
// TODO: Prevent token connections from creating tokens.
// TODO: Token permission.
export async function create({ expiresIn }) {
return (
await this.createAuthenticationToken({
expiresIn,
userId: this.session.get('user_id'),
})
).id
return (await this.createAuthenticationToken({
expiresIn,
userId: this.session.get('user_id'),
})).id
}
create.description = 'create a new authentication token'

View File

@@ -38,15 +38,14 @@ exportConfig.permission = 'admin'
function handleGetAllObjects(req, res, { filter, limit }) {
const objects = this.getObjects({ filter, limit })
res.set('Content-Type', 'application/json')
return fromCallback(pipeline, createNdJsonStream(objects), res)
return fromCallback(cb => pipeline(createNdJsonStream(objects), res, cb))
}
export function getAllObjects({ filter, limit, ndjson = false }) {
return ndjson
? this.registerHttpRequest(handleGetAllObjects, {
filter,
limit,
}).then($getFrom => ({ $getFrom }))
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then(
$getFrom => ({ $getFrom })
)
: this.getObjects({ filter, limit })
}

View File

@@ -906,9 +906,10 @@ async function createNewDisk(xapi, sr, vm, diskSize) {
async function mountNewDisk(localEndpoint, hostname, newDeviceFiledeviceFile) {
const brickRootCmd =
'bash -c \'mkdir -p /bricks; for TESTVAR in {1..9}; do TESTDIR="/bricks/xosan$TESTVAR" ;if mkdir $TESTDIR; then echo $TESTDIR; exit 0; fi ; done ; exit 1\''
const newBrickRoot = (
await remoteSsh(localEndpoint, brickRootCmd)
).stdout.trim()
const newBrickRoot = (await remoteSsh(
localEndpoint,
brickRootCmd
)).stdout.trim()
const brickName = `${hostname}:${newBrickRoot}/xosandir`
const mountBrickCmd = `mkfs.xfs -i size=512 ${newDeviceFiledeviceFile}; mkdir -p ${newBrickRoot}; echo "${newDeviceFiledeviceFile} ${newBrickRoot} xfs defaults 0 0" >> /etc/fstab; mount -a`
await remoteSsh(localEndpoint, mountBrickCmd)
@@ -960,12 +961,10 @@ async function replaceBrickOnSameVM(
.split('/')
.slice(0, 3)
.join('/')
const previousBrickDevice = (
await remoteSsh(
localEndpoint,
`grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`
)
).stdout.trim()
const previousBrickDevice = (await remoteSsh(
localEndpoint,
`grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`
)).stdout.trim()
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 1 }
const brickName = await mountNewDisk(
localEndpoint,
@@ -1181,10 +1180,7 @@ async function _importGlusterVM(xapi, template, lvmsrId) {
}
function _findAFreeIPAddress(nodes, networkPrefix) {
return _findIPAddressOutsideList(
map(nodes, n => n.vm.ip),
networkPrefix
)
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip), networkPrefix)
}
function _findIPAddressOutsideList(

View File

@@ -1,6 +1,7 @@
import appConf from 'app-conf'
import assert from 'assert'
import authenticator from 'otplib/authenticator'
import bind from 'lodash/bind'
import blocked from 'blocked'
import compression from 'compression'
import createExpress from 'express'
@@ -15,12 +16,10 @@ import serveStatic from 'serve-static'
import stoppable from 'stoppable'
import WebServer from 'http-server-plus'
import WebSocket from 'ws'
import { forOwn, map } from 'lodash'
import { URL } from 'url'
import { compile as compilePug } from 'pug'
import { createServer as createProxyServer } from 'http-proxy'
import { fromCallback, fromEvent } from 'promise-toolbox'
import { fromEvent } from 'promise-toolbox'
import { ifDef } from '@xen-orchestra/defined'
import { join as joinPath } from 'path'
@@ -28,9 +27,9 @@ import JsonRpcPeer from 'json-rpc-peer'
import { invalidCredentials } from 'xo-common/api-errors'
import { ensureDir, readdir, readFile } from 'fs-extra'
import ensureArray from './_ensureArray'
import parseDuration from './_parseDuration'
import Xo from './xo'
import { forEach, mapToArray, pFromCallback } from './utils'
import bodyParser from 'body-parser'
import connectFlash from 'connect-flash'
@@ -73,7 +72,7 @@ async function loadConfiguration() {
log.info('Configuration loaded.')
// Print a message if deprecated entries are specified.
DEPRECATED_ENTRIES.forEach(entry => {
forEach(DEPRECATED_ENTRIES, entry => {
if (has(config, entry)) {
log.warn(`${entry} configuration is deprecated.`)
}
@@ -237,7 +236,7 @@ async function setUpPassport(express, xo, { authentication: authCfg }) {
next()
} else {
req.flash('return-url', url)
return res.redirect(authCfg.defaultSignInPage)
return res.redirect('/signin')
}
})
@@ -267,15 +266,16 @@ async function registerPlugin(pluginPath, pluginName) {
})()
// Supports both “normal” CommonJS and Babel's ES2015 modules.
let {
const {
default: factory = plugin,
configurationSchema,
configurationPresets,
testSchema,
} = plugin
let instance
const handleFactory = factory =>
// The default export can be either a factory or directly a plugin
// instance.
const instance =
typeof factory === 'function'
? factory({
xo: this,
@@ -285,17 +285,6 @@ async function registerPlugin(pluginPath, pluginName) {
},
})
: factory
;[
instance,
configurationSchema,
configurationPresets,
testSchema,
] = await Promise.all([
handleFactory(factory),
handleFactory(configurationSchema),
handleFactory(configurationPresets),
handleFactory(testSchema),
])
await this.registerPlugin(
pluginName,
@@ -336,7 +325,7 @@ async function registerPluginsInPath(path) {
})
await Promise.all(
files.map(name => {
mapToArray(files, name => {
if (name.startsWith(PLUGIN_PREFIX)) {
return registerPluginWrapper.call(
this,
@@ -350,9 +339,9 @@ async function registerPluginsInPath(path) {
async function registerPlugins(xo) {
await Promise.all(
[`${__dirname}/../node_modules/`, '/usr/local/lib/node_modules/'].map(
registerPluginsInPath,
xo
mapToArray(
[`${__dirname}/../node_modules/`, '/usr/local/lib/node_modules/'],
xo::registerPluginsInPath
)
)
}
@@ -406,7 +395,7 @@ async function createWebServer({ listen, listenOptions }) {
const webServer = stoppable(new WebServer())
await Promise.all(
map(listen, opts =>
mapToArray(listen, opts =>
makeWebServerListen(webServer, { ...listenOptions, ...opts })
)
)
@@ -424,21 +413,7 @@ const setUpProxies = (express, opts, xo) => {
const proxy = createProxyServer({
changeOrigin: true,
ignorePath: true,
}).on('error', (error, req, res) => {
// `res` can be either a `ServerResponse` or a `Socket` (which does not have
// `writeHead`)
if (!res.headersSent && typeof res.writeHead === 'function') {
res.writeHead(500, { 'content-type': 'text/plain' })
res.write('There was a problem proxying this request.')
}
res.end()
const { method, url } = req
log.error('failed to proxy request', {
error,
req: { method, url },
})
})
}).on('error', error => console.error(error))
// TODO: sort proxies by descending prefix length.
@@ -451,8 +426,6 @@ const setUpProxies = (express, opts, xo) => {
const target = opts[prefix]
proxy.web(req, res, {
agent:
new URL(target).hostname === 'localhost' ? undefined : xo.httpAgent,
target: target + url.slice(prefix.length),
})
@@ -467,7 +440,7 @@ const setUpProxies = (express, opts, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
express.on('upgrade', (req, socket, head) => {
const { url } = req
@@ -477,8 +450,6 @@ const setUpProxies = (express, opts, xo) => {
const target = opts[prefix]
proxy.ws(req, socket, head, {
agent:
new URL(target).hostname === 'localhost' ? undefined : xo.httpAgent,
target: target + url.slice(prefix.length),
})
@@ -491,8 +462,12 @@ const setUpProxies = (express, opts, xo) => {
// ===================================================================
const setUpStaticFiles = (express, opts) => {
forOwn(opts, (paths, url) => {
ensureArray(paths).forEach(path => {
forEach(opts, (paths, url) => {
if (!Array.isArray(paths)) {
paths = [paths]
}
forEach(paths, path => {
log.info(`Setting up ${url}${path}`)
express.use(url, serveStatic(path))
@@ -508,7 +483,7 @@ const setUpApi = (webServer, xo, config) => {
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
const onConnection = (socket, upgradeReq) => {
const { remoteAddress } = upgradeReq.socket
@@ -527,7 +502,7 @@ const setUpApi = (webServer, xo, config) => {
return xo.callApiMethod(connection, message.method, message.params)
}
})
connection.notify = jsonRpc.notify.bind(jsonRpc)
connection.notify = bind(jsonRpc.notify, jsonRpc)
// Close the XO connection with this WebSocket.
socket.once('close', () => {
@@ -540,8 +515,7 @@ const setUpApi = (webServer, xo, config) => {
socket.on('message', message => {
const expiration = connection.get('expiration', undefined)
if (expiration !== undefined && expiration < Date.now()) {
connection.close()
return
return void connection.close()
}
jsonRpc.write(message)
@@ -577,7 +551,7 @@ const setUpConsoleProxy = (webServer, xo) => {
const webSocketServer = new WebSocket.Server({
noServer: true,
})
xo.on('stop', () => fromCallback.call(webSocketServer, 'close'))
xo.on('stop', () => pFromCallback(cb => webSocketServer.close(cb)))
webServer.on('upgrade', async (req, socket, head) => {
const matches = CONSOLE_PROXY_PATH_RE.exec(req.url)
@@ -670,7 +644,7 @@ export default async function main(args) {
const xo = new Xo(config)
// Register web server close on XO stop.
xo.on('stop', () => fromCallback.call(webServer, 'stop'))
xo.on('stop', () => pFromCallback(cb => webServer.stop(cb)))
// Connects to all registered servers.
await xo.start()
@@ -683,7 +657,7 @@ export default async function main(args) {
if (config.http.redirectToHttps) {
let port
forOwn(config.http.listen, listen => {
forEach(config.http.listen, listen => {
if (listen.port && (listen.cert || listen.certificate)) {
port = listen.port
return false
@@ -707,7 +681,7 @@ export default async function main(args) {
setUpConsoleProxy(webServer, xo)
// Must be set up before the API.
express.use(xo._handleHttpRequest.bind(xo))
express.use(bind(xo._handleHttpRequest, xo))
// Everything above is not protected by the sign in, allowing xo-cli
// to work properly.
@@ -735,7 +709,7 @@ export default async function main(args) {
//
// TODO: implements a timeout? (or maybe it is the services launcher
// responsibility?)
;['SIGINT', 'SIGTERM'].forEach(signal => {
forEach(['SIGINT', 'SIGTERM'], signal => {
let alreadyCalled = false
process.on(signal, () => {

View File

@@ -19,10 +19,7 @@ describe('mergeObjects', function() {
{ b: 2, c: 3 },
{ d: 4, e: 5, f: 6 },
],
'One set': [
{ a: 1, b: 2 },
{ a: 1, b: 2 },
],
'One set': [{ a: 1, b: 2 }, { a: 1, b: 2 }],
'Empty set': [{ a: 1 }, { a: 1 }, {}],
'All empty': [{}, {}, {}],
'No set': [{}],
@@ -47,52 +44,28 @@ describe('crossProduct', function() {
{
'2 sets of 2 items to multiply': [
[10, 14, 15, 21],
[
[2, 3],
[5, 7],
],
[[2, 3], [5, 7]],
multiplyTest,
],
'3 sets of 2 items to multiply': [
[110, 130, 154, 182, 165, 195, 231, 273],
[
[2, 3],
[5, 7],
[11, 13],
],
[[2, 3], [5, 7], [11, 13]],
multiplyTest,
],
'2 sets of 3 items to multiply': [
[14, 22, 26, 21, 33, 39, 35, 55, 65],
[
[2, 3, 5],
[7, 11, 13],
],
[[2, 3, 5], [7, 11, 13]],
multiplyTest,
],
'2 sets of 2 items to add': [
[7, 9, 8, 10],
[
[2, 3],
[5, 7],
],
addTest,
],
'2 sets of 2 items to add': [[7, 9, 8, 10], [[2, 3], [5, 7]], addTest],
'3 sets of 2 items to add': [
[18, 20, 20, 22, 19, 21, 21, 23],
[
[2, 3],
[5, 7],
[11, 13],
],
[[2, 3], [5, 7], [11, 13]],
addTest,
],
'2 sets of 3 items to add': [
[9, 13, 15, 10, 14, 16, 12, 16, 18],
[
[2, 3, 5],
[7, 11, 13],
],
[[2, 3, 5], [7, 11, 13]],
addTest,
],
},

Some files were not shown because too many files have changed in this diff Show More