Compare commits
11 Commits
correct-ne
...
test-resto
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
50704b8404 | ||
|
|
341cdf6cba | ||
|
|
ced02de429 | ||
|
|
1745d61c1d | ||
|
|
fac2e6845c | ||
|
|
fd7eddb68c | ||
|
|
23fb486a40 | ||
|
|
9114aa5b11 | ||
|
|
fdb3368b44 | ||
|
|
c0949e9aef | ||
|
|
ae9b4126b1 |
11
.eslintrc.js
11
.eslintrc.js
@@ -21,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -38,15 +38,6 @@ module.exports = {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
// disabled because not always relevant, we might reconsider in the future
|
||||
//
|
||||
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
|
||||
//
|
||||
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -8,8 +8,5 @@
|
||||
"directory": "@xen-orchestra/babel-config",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
const getopts = require('getopts')
|
||||
|
||||
const { version } = require('./package.json')
|
||||
|
||||
module.exports = commands =>
|
||||
async function(args, prefix) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
help: 'h',
|
||||
},
|
||||
boolean: ['help'],
|
||||
stopEarly: true,
|
||||
})
|
||||
|
||||
const commandName = opts.help || args.length === 0 ? 'help' : args[0]
|
||||
const command = commands[commandName]
|
||||
if (command === undefined) {
|
||||
process.stdout.write(`Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${prefix} ${command} ${commands[command].usage || ''}`)
|
||||
.join('\n\n')}
|
||||
|
||||
xo-backups v${version}
|
||||
`)
|
||||
process.exitCode = commandName === 'help' ? 0 : 1
|
||||
return
|
||||
}
|
||||
|
||||
return command.main(args.slice(1), prefix + ' ' + commandName)
|
||||
}
|
||||
@@ -1,393 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let force
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const getopts = require('getopts')
|
||||
const lockfile = require('proper-lockfile')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { curryRight, flatten } = require('lodash')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { pipe, promisifyAll } = require('promise-toolbox')
|
||||
|
||||
const fs = promisifyAll(require('fs'))
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
|
||||
const filter = (...args) => thisArg => thisArg.filter(...args)
|
||||
|
||||
const isGzipFile = async fd => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, magicNumber, 0, magicNumber.length, 0),
|
||||
magicNumber.length
|
||||
)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
// TODO: better check?
|
||||
//
|
||||
// our heuristic is not good enough, there has been some false positives
|
||||
// (detected as invalid by us but valid by `tar` and imported with success),
|
||||
// either THOUGH THEY MAY HAVE BEEN COMPRESSED FILES:
|
||||
// - these files were normal but the check is incorrect
|
||||
// - these files were invalid but without data loss
|
||||
// - these files were invalid but with silent data loss
|
||||
//
|
||||
// maybe reading the end of the file looking for a file named
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async (size, fd) => {
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, buf, 0, buf.length, size - buf.length),
|
||||
buf.length
|
||||
)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
const isValidXva = async path => {
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
if (size < 20) {
|
||||
// neither a valid gzip not tar
|
||||
return false
|
||||
}
|
||||
|
||||
return (await isGzipFile(fd))
|
||||
? true // gzip files cannot be validated at this time
|
||||
: await isValidTar(size, fd)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidXva', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const readDir = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = `${path}/${entry}`
|
||||
})
|
||||
|
||||
return entries
|
||||
},
|
||||
error => {
|
||||
// a missing dir is by definition empty
|
||||
if (error != null && error.code === 'ENOENT') {
|
||||
return []
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
force && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
readDir,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas] = await readDir(vmDir).then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidXva(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
alias: {
|
||||
force: 'f',
|
||||
},
|
||||
boolean: ['force'],
|
||||
default: {
|
||||
force: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ force } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
require('./_composeCommands')({
|
||||
'clean-vms': {
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: '[--force] xo-vm-backups/*',
|
||||
},
|
||||
})(process.argv.slice(2), 'xo-backups').catch(error => {
|
||||
console.error('main', error)
|
||||
process.exitCode = 1
|
||||
})
|
||||
@@ -1,28 +0,0 @@
|
||||
{
|
||||
"bin": {
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.7.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/backups-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.0.0"
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.27.3"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.6",
|
||||
"version": "1.0.3",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -46,8 +46,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -7,21 +7,7 @@ const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
class Job {
|
||||
constructor(schedule, fn) {
|
||||
let scheduledDate
|
||||
const wrapper = () => {
|
||||
const now = Date.now()
|
||||
if (scheduledDate > now) {
|
||||
// we're early, delay
|
||||
//
|
||||
// no need to check _isEnabled, we're just delaying the existing timeout
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4625
|
||||
this._timeout = setTimeout(wrapper, scheduledDate - now)
|
||||
return
|
||||
}
|
||||
|
||||
this._isRunning = true
|
||||
|
||||
let result
|
||||
try {
|
||||
result = fn()
|
||||
@@ -36,36 +22,23 @@ class Job {
|
||||
}
|
||||
}
|
||||
const scheduleNext = () => {
|
||||
this._isRunning = false
|
||||
|
||||
if (this._isEnabled) {
|
||||
const now = schedule._createDate()
|
||||
scheduledDate = +next(schedule._schedule, now)
|
||||
const delay = scheduledDate - now
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
const delay = schedule._nextDelay()
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
|
||||
this._isEnabled = false
|
||||
this._isRunning = false
|
||||
this._scheduleNext = scheduleNext
|
||||
this._timeout = undefined
|
||||
}
|
||||
|
||||
start() {
|
||||
this.stop()
|
||||
|
||||
this._isEnabled = true
|
||||
if (!this._isRunning) {
|
||||
this._scheduleNext()
|
||||
}
|
||||
this._scheduleNext()
|
||||
}
|
||||
|
||||
stop() {
|
||||
this._isEnabled = false
|
||||
clearTimeout(this._timeout)
|
||||
}
|
||||
}
|
||||
@@ -95,6 +68,11 @@ class Schedule {
|
||||
return dates
|
||||
}
|
||||
|
||||
_nextDelay() {
|
||||
const now = this._createDate()
|
||||
return next(this._schedule, now) - now
|
||||
}
|
||||
|
||||
startJob(fn) {
|
||||
const job = this.createJob(fn)
|
||||
job.start()
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createSchedule } from './'
|
||||
|
||||
const wrap = value => () => value
|
||||
|
||||
describe('issues', () => {
|
||||
let originalDateNow
|
||||
beforeAll(() => {
|
||||
originalDateNow = Date.now
|
||||
})
|
||||
afterAll(() => {
|
||||
Date.now = originalDateNow
|
||||
originalDateNow = undefined
|
||||
})
|
||||
|
||||
test('stop during async execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const schedule = createSchedule('* * * * *')
|
||||
const job = schedule.createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(1)
|
||||
})
|
||||
|
||||
test('stop then start during async job execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const schedule = createSchedule('* * * * *')
|
||||
const job = schedule.createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
job.start()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
Date.now = wrap(+schedule.next(1)[0])
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(2)
|
||||
})
|
||||
})
|
||||
@@ -1,13 +1,13 @@
|
||||
# @xen-orchestra/defined [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/defined):
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/defined
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
|
||||
@@ -34,8 +34,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -62,10 +62,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.10.2",
|
||||
"version": "0.10.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -18,19 +18,19 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.14.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^3.2.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^5.1.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
@@ -46,10 +46,10 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -389,7 +389,7 @@ export default class RemoteHandlerAbstract {
|
||||
async test(): Promise<Object> {
|
||||
const SIZE = 1024 * 1024 * 10
|
||||
const testFileName = normalizePath(`${Date.now()}.test`)
|
||||
const data = await fromCallback(randomBytes, SIZE)
|
||||
const data = await fromCallback(cb => randomBytes(SIZE, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
const writeStart = process.hrtime()
|
||||
|
||||
@@ -86,7 +86,7 @@ handlers.forEach(url => {
|
||||
describe('#createOutputStream()', () => {
|
||||
it('creates parent dir if missing', async () => {
|
||||
const stream = await handler.createOutputStream('dir/file')
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
|
||||
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
@@ -106,7 +106,7 @@ handlers.forEach(url => {
|
||||
describe('#createWriteStream()', () => {
|
||||
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
|
||||
const stream = await handler.createWriteStream(file, { flags })
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
|
||||
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
})
|
||||
|
||||
|
||||
@@ -47,19 +47,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
})
|
||||
}
|
||||
|
||||
async _getInfo() {
|
||||
// df.file() resolves with an object with the following properties:
|
||||
// filesystem, type, size, used, available, capacity and mountpoint.
|
||||
// size, used, available and capacity may be `NaN` so we remove any `NaN`
|
||||
// value from the object.
|
||||
const info = await df.file(this._getFilePath('/'))
|
||||
Object.keys(info).forEach(key => {
|
||||
if (Number.isNaN(info[key])) {
|
||||
delete info[key]
|
||||
}
|
||||
})
|
||||
|
||||
return info
|
||||
_getInfo() {
|
||||
return df.file(this._getFilePath('/'))
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
|
||||
@@ -15,7 +15,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/log):
|
||||
Everywhere something should be logged:
|
||||
|
||||
```js
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
|
||||
const log = createLogger('my-module')
|
||||
|
||||
@@ -42,7 +42,6 @@ log.error('could not join server', {
|
||||
Then, at application level, configure the logs are handled:
|
||||
|
||||
```js
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { configure, catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
import transportConsole from '@xen-orchestra/log/transports/console'
|
||||
import transportEmail from '@xen-orchestra/log/transports/email'
|
||||
@@ -78,8 +77,8 @@ configure([
|
||||
])
|
||||
|
||||
// send all global errors (uncaught exceptions, warnings, unhandled rejections)
|
||||
// to this logger
|
||||
catchGlobalErrors(createLogger('app'))
|
||||
// to this transport
|
||||
catchGlobalErrors(transport)
|
||||
```
|
||||
|
||||
### Transports
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/log",
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.4",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -31,16 +31,16 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -48,7 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,7 @@ const createTransport = config => {
|
||||
}
|
||||
}
|
||||
|
||||
let { filter } = config
|
||||
let transport = createTransport(config.transport)
|
||||
let { filter, transport } = config
|
||||
const level = resolve(config.level)
|
||||
|
||||
if (filter !== undefined) {
|
||||
@@ -52,12 +51,11 @@ const symbol =
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
|
||||
const { env } = process
|
||||
global[symbol] = createTransport({
|
||||
// display warnings or above, and all that are enabled via DEBUG or
|
||||
// NODE_DEBUG env
|
||||
filter: [env.DEBUG, env.NODE_DEBUG].filter(Boolean).join(','),
|
||||
level: resolve(env.LOG_LEVEL, LEVELS.INFO),
|
||||
filter: process.env.DEBUG || process.env.NODE_DEBUG,
|
||||
level: LEVELS.INFO,
|
||||
|
||||
transport: createConsoleTransport(),
|
||||
})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import createTransport from './transports/console'
|
||||
import LEVELS, { resolve } from './levels'
|
||||
import LEVELS from './levels'
|
||||
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
@@ -9,8 +9,7 @@ if (!(symbol in global)) {
|
||||
// the default behavior, without requiring `configure` is to avoid
|
||||
// logging anything unless it's a real error
|
||||
const transport = createTransport()
|
||||
const level = resolve(process.env.LOG_LEVEL, LEVELS.WARN)
|
||||
global[symbol] = log => log.level >= level && transport(log)
|
||||
global[symbol] = log => log.level > LEVELS.WARN && transport(log)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -73,5 +72,5 @@ prototype.wrap = function(message, fn) {
|
||||
}
|
||||
}
|
||||
|
||||
export const createLogger = namespace => new Logger(namespace)
|
||||
const createLogger = namespace => new Logger(namespace)
|
||||
export { createLogger as default }
|
||||
|
||||
@@ -13,22 +13,11 @@ for (const name in LEVELS) {
|
||||
NAMES[LEVELS[name]] = name
|
||||
}
|
||||
|
||||
// resolves to the number representation of a level
|
||||
//
|
||||
// returns `defaultLevel` if invalid
|
||||
export const resolve = (level, defaultLevel) => {
|
||||
const type = typeof level
|
||||
if (type === 'number') {
|
||||
if (level in NAMES) {
|
||||
return level
|
||||
}
|
||||
} else if (type === 'string') {
|
||||
const nLevel = LEVELS[level.toUpperCase()]
|
||||
if (nLevel !== undefined) {
|
||||
return nLevel
|
||||
}
|
||||
export const resolve = level => {
|
||||
if (typeof level === 'string') {
|
||||
level = LEVELS[level.toUpperCase()]
|
||||
}
|
||||
return defaultLevel
|
||||
return level
|
||||
}
|
||||
|
||||
Object.freeze(LEVELS)
|
||||
|
||||
@@ -1,23 +1,26 @@
|
||||
import LEVELS, { NAMES } from '../levels'
|
||||
|
||||
// Bind console methods (necessary for browsers)
|
||||
/* eslint-disable no-console */
|
||||
const debugConsole = console.log.bind(console)
|
||||
const infoConsole = console.info.bind(console)
|
||||
const warnConsole = console.warn.bind(console)
|
||||
const errorConsole = console.error.bind(console)
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const { ERROR, INFO, WARN } = LEVELS
|
||||
|
||||
const consoleTransport = ({ data, level, namespace, message, time }) => {
|
||||
const fn =
|
||||
/* eslint-disable no-console */
|
||||
level < INFO
|
||||
? console.log
|
||||
? debugConsole
|
||||
: level < WARN
|
||||
? console.info
|
||||
? infoConsole
|
||||
: level < ERROR
|
||||
? console.warn
|
||||
: console.error
|
||||
/* eslint-enable no-console */
|
||||
? warnConsole
|
||||
: errorConsole
|
||||
|
||||
const args = [time.toISOString(), namespace, NAMES[level], message]
|
||||
if (data != null) {
|
||||
args.push(data)
|
||||
}
|
||||
fn.apply(console, args)
|
||||
fn('%s - %s - [%s] %s', time.toISOString(), namespace, NAMES[level], message)
|
||||
data != null && fn(data)
|
||||
}
|
||||
export default () => consoleTransport
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# @xen-orchestra/mixin [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/mixin):
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/mixin
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-dev": "^1.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# @xen-orchestra/template [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/template):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/template
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Create a string replacer based on a pattern and a list of rules.
|
||||
|
||||
```js
|
||||
const myReplacer = compileTemplate('{name}_COPY_\{name}_{id}_%\%', {
|
||||
'{name}': vm => vm.name_label,
|
||||
'{id}': vm => vm.id,
|
||||
'%': (_, i) => i
|
||||
})
|
||||
|
||||
const newString = myReplacer({
|
||||
name_label: 'foo',
|
||||
id: 42,
|
||||
}, 32)
|
||||
|
||||
newString === 'foo_COPY_{name}_42_32%' // true
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
@@ -1,46 +0,0 @@
|
||||
{
|
||||
"name": "@xen-orchestra/template",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/template",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/template",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules)
|
||||
.sort(compareLengthDesc)
|
||||
.map(escapeRegExp)
|
||||
.join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
pattern.replace(regExp, match => {
|
||||
if (match[0] === '\\') {
|
||||
return match.slice(1)
|
||||
}
|
||||
const rule = rules[match]
|
||||
return typeof rule === 'function' ? rule(...params) : rule
|
||||
})
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate(
|
||||
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
|
||||
{
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
}
|
||||
)
|
||||
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
|
||||
})
|
||||
211
CHANGELOG.md
211
CHANGELOG.md
@@ -4,218 +4,17 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
|
||||
- [SAML] Setting to disable requested authentication context (helps with _Active Directory_) (PR [#4675](https://github.com/vatesfr/xen-orchestra/pull/4675))
|
||||
- The default sign-in page can be configured via `authentication.defaultSignInPage` (PR [#4678](https://github.com/vatesfr/xen-orchestra/pull/4678))
|
||||
- [SR] Allow import of VHD and VMDK disks [#4137](https://github.com/vatesfr/xen-orchestra/issues/4137) (PR [#4138](https://github.com/vatesfr/xen-orchestra/pull/4138) )
|
||||
- [Host] Advanced Live Telemetry (PR [#4680](https://github.com/vatesfr/xen-orchestra/pull/4680))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Metadata backup] Add 10 minutes timeout to avoid stuck jobs [#4657](https://github.com/vatesfr/xen-orchestra/issues/4657) (PR [#4666](https://github.com/vatesfr/xen-orchestra/pull/4666))
|
||||
- [Metadata backups] Fix out-of-date listing for 1 minute due to cache (PR [#4672](https://github.com/vatesfr/xen-orchestra/pull/4672))
|
||||
- [Delta backup] Limit the number of merged deltas per run to avoid interrupted jobs (PR [#4674](https://github.com/vatesfr/xen-orchestra/pull/4674))
|
||||
|
||||
### Released packages
|
||||
|
||||
- vhd-lib v0.7.2
|
||||
- xo-vmdk-to-vhd v0.1.8
|
||||
- xo-server-auth-ldap v0.6.6
|
||||
- xo-server-auth-saml v0.7.0
|
||||
- xo-server-backup-reports v0.16.4
|
||||
- @xen-orchestra/fs v0.10.2
|
||||
- xo-server v5.53.0
|
||||
- xo-web v5.53.1
|
||||
|
||||
## **5.40.2** (2019-11-22)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Logs] Ability to report a bug with attached log (PR [#4201](https://github.com/vatesfr/xen-orchestra/pull/4201))
|
||||
- [Backup] Reduce _VDI chain protection error_ occurrence by being more tolerant (configurable via `xo-server`'s `xapiOptions.maxUncoalescedVdis` setting) [#4124](https://github.com/vatesfr/xen-orchestra/issues/4124) (PR [#4651](https://github.com/vatesfr/xen-orchestra/pull/4651))
|
||||
- [Plugin] [Web hooks](https://xen-orchestra.com/docs/web-hooks.html) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
|
||||
- [Tables] Always put the tables' search in the URL [#4542](https://github.com/vatesfr/xen-orchestra/issues/4542) (PR [#4637](https://github.com/vatesfr/xen-orchestra/pull/4637))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SDN controller] Prevent private network creation on bond slave PIF (Fixes https://github.com/xcp-ng/xcp/issues/300) (PR [4633](https://github.com/vatesfr/xen-orchestra/pull/4633))
|
||||
- [Metadata backup] Fix failed backup reported as successful [#4596](https://github.com/vatesfr/xen-orchestra/issues/4596) (PR [#4598](https://github.com/vatesfr/xen-orchestra/pull/4598))
|
||||
- [Backup NG] Fix "task cancelled" error when the backup job timeout exceeds 596 hours [#4662](https://github.com/vatesfr/xen-orchestra/issues/4662) (PR [#4663](https://github.com/vatesfr/xen-orchestra/pull/4663))
|
||||
- Fix `promise rejected with non-error` warnings in logs (PR [#4659](https://github.com/vatesfr/xen-orchestra/pull/4659))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-web-hooks v0.1.0
|
||||
- xen-api v0.27.3
|
||||
- xo-server-backup-reports v0.16.3
|
||||
- vhd-lib v0.7.1
|
||||
- xo-server v5.52.1
|
||||
- xo-web v5.52.0
|
||||
|
||||
## **5.40.1** (2019-10-29)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOSAN] Fix "Install Cloud plugin" warning (PR [#4631](https://github.com/vatesfr/xen-orchestra/pull/4631))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.51.1
|
||||
|
||||
## **5.40.0** (2019-10-29)
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `xo-server` requires Node 8.
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
|
||||
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
|
||||
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
|
||||
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
|
||||
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
|
||||
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
|
||||
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
|
||||
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/cron v1.0.6
|
||||
- xo-server-transport-icinga2 v0.1.0
|
||||
- xo-server-sdn-controller v0.3.1
|
||||
- xo-server v5.51.1
|
||||
- xo-web v5.51.0
|
||||
|
||||
### Dropped packages
|
||||
|
||||
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
|
||||
|
||||
|
||||
## **5.39.1** (2019-10-11)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
|
||||
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.50.3
|
||||
|
||||
|
||||
## **5.39.0** (2019-09-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
|
||||
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
|
||||
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/new] Clarify address formats [#4450](https://github.com/vatesfr/xen-orchestra/issues/4450) (PR [#4460](https://github.com/vatesfr/xen-orchestra/pull/4460))
|
||||
- [Backup NG/New] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4411](https://github.com/vatesfr/xen-orchestra/pull/4411))
|
||||
- [VM/disks] Don't hide disks that are attached to the same VM twice [#4400](https://github.com/vatesfr/xen-orchestra/issues/4400) (PR [#4414](https://github.com/vatesfr/xen-orchestra/pull/4414))
|
||||
- [SDN Controller] Ability to configure MTU for private networks (PR [#4491](https://github.com/vatesfr/xen-orchestra/pull/4491))
|
||||
- [VM Export] Filenames are now prefixed with datetime [#4503](https://github.com/vatesfr/xen-orchestra/issues/4503)
|
||||
- [Settings/Logs] Differenciate XS/XCP-ng errors from XO errors [#4101](https://github.com/vatesfr/xen-orchestra/issues/4101) (PR [#4385](https://github.com/vatesfr/xen-orchestra/pull/4385))
|
||||
- [Backups] Improve performance by caching logs consolidation (PR [#4541](https://github.com/vatesfr/xen-orchestra/pull/4541))
|
||||
- [New VM] Cloud Init available for all plans (PR [#4543](https://github.com/vatesfr/xen-orchestra/pull/4543))
|
||||
- [Servers] IPv6 addresses can be used [#4520](https://github.com/vatesfr/xen-orchestra/issues/4520) (PR [#4521](https://github.com/vatesfr/xen-orchestra/pull/4521)) \
|
||||
Note: They must enclosed in brackets to differentiate with the port, e.g.: `[2001:db8::7334]` or `[ 2001:db8::7334]:4343`
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [PBD] Obfuscate cifs password from device config [#4384](https://github.com/vatesfr/xen-orchestra/issues/4384) (PR [#4401](https://github.com/vatesfr/xen-orchestra/pull/4401))
|
||||
- [XOSAN] Fix "invalid parameters" error on creating a SR (PR [#4478](https://github.com/vatesfr/xen-orchestra/pull/4478))
|
||||
- [Patching] Avoid overloading XCP-ng by reducing the frequency of yum update checks [#4358](https://github.com/vatesfr/xen-orchestra/issues/4358) (PR [#4477](https://github.com/vatesfr/xen-orchestra/pull/4477))
|
||||
- [Network] Fix inability to create a bonded network (PR [#4489](https://github.com/vatesfr/xen-orchestra/pull/4489))
|
||||
- [Backup restore & Replication] Don't copy `sm_config` to new VDIs which might leads to useless coalesces [#4482](https://github.com/vatesfr/xen-orchestra/issues/4482) (PR [#4484](https://github.com/vatesfr/xen-orchestra/pull/4484))
|
||||
- [Home] Fix intermediary "no results" display showed on filtering items [#4420](https://github.com/vatesfr/xen-orchestra/issues/4420) (PR [#4456](https://github.com/vatesfr/xen-orchestra/pull/4456)
|
||||
- [Backup NG/New schedule] Properly show user errors in the form [#3831](https://github.com/vatesfr/xen-orchestra/issues/3831) (PR [#4131](https://github.com/vatesfr/xen-orchestra/pull/4131))
|
||||
- [VM/Advanced] Fix `"vm.set_domain_type" is not a function` error on switching virtualization mode (PV/HVM) [#4348](https://github.com/vatesfr/xen-orchestra/issues/4348) (PR [#4504](https://github.com/vatesfr/xen-orchestra/pull/4504))
|
||||
- [Backup NG/logs] Show warning when zstd compression is selected but not supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4375](https://github.com/vatesfr/xen-orchestra/pull/4375)
|
||||
- [Patches] Fix patches installation for CH 8.0 (PR [#4511](https://github.com/vatesfr/xen-orchestra/pull/4511))
|
||||
- [Network] Fix inability to set a network name [#4514](https://github.com/vatesfr/xen-orchestra/issues/4514) (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [Backup NG] Fix race conditions that could lead to disabled jobs still running (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [XOA] Remove "Updates" and "Licenses" tabs for non admin users (PR [#4526](https://github.com/vatesfr/xen-orchestra/pull/4526))
|
||||
- [New VM] Ability to escape [cloud config template](https://xen-orchestra.com/blog/xen-orchestra-5-21/#cloudconfigtemplates) variables [#4486](https://github.com/vatesfr/xen-orchestra/issues/4486) (PR [#4501](https://github.com/vatesfr/xen-orchestra/pull/4501))
|
||||
- [Backup NG] Properly log and report if job is already running [#4497](https://github.com/vatesfr/xen-orchestra/issues/4497) (PR [4534](https://github.com/vatesfr/xen-orchestra/pull/4534))
|
||||
- [Host] Fix an issue where host was wrongly reporting time inconsistency (PR [#4540](https://github.com/vatesfr/xen-orchestra/pull/4540))
|
||||
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.27.2
|
||||
- xo-server-cloud v0.3.0
|
||||
- @xen-orchestra/cron v1.0.4
|
||||
- xo-server-sdn-controller v0.3.0
|
||||
- @xen-orchestra/template v0.1.0
|
||||
- xo-server v5.50.1
|
||||
- xo-web v5.50.2
|
||||
|
||||
|
||||
## **5.38.0** (2019-08-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
|
||||
- [Zstd]
|
||||
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
|
||||
- [VM/Bulk copy] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4346](https://github.com/vatesfr/xen-orchestra/pull/4346))
|
||||
- [VM import & Continuous Replication] Enable `guessVhdSizeOnImport` by default, this fix some `VDI_IO_ERROR` with XenServer 7.1 and XCP-ng 8.0 (PR [#4436](https://github.com/vatesfr/xen-orchestra/pull/4436))
|
||||
- [SDN Controller] Add possibility to create multiple GRE networks and VxLAN networks within a same pool (PR [#4435](https://github.com/vatesfr/xen-orchestra/pull/4435))
|
||||
- [SDN Controller] Add possibility to create cross-pool private networks (PR [#4405](https://github.com/vatesfr/xen-orchestra/pull/4405))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
|
||||
- [VM/Attach disk] Fix checking VDI mode (PR [#4373](https://github.com/vatesfr/xen-orchestra/pull/4373))
|
||||
- [VM revert] Snapshot before: add admin ACLs on created snapshot [#4331](https://github.com/vatesfr/xen-orchestra/issues/4331) (PR [#4391](https://github.com/vatesfr/xen-orchestra/pull/4391))
|
||||
- [Network] Fixed "invalid parameters" error when creating bonded network [#4425](https://github.com/vatesfr/xen-orchestra/issues/4425) (PR [#4429](https://github.com/vatesfr/xen-orchestra/pull/4429))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.2.0
|
||||
- xo-server-usage-report v0.7.3
|
||||
- xo-server v5.48.0
|
||||
- xo-web v5.48.1
|
||||
|
||||
## **5.37.1** (2019-08-06)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
|
||||
## **5.37.0** (2019-07-25)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
@@ -256,6 +55,8 @@
|
||||
|
||||
## **5.36.0** (2019-06-27)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
|
||||
@@ -7,12 +7,16 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Host] Fix Enable Live Telemetry button state (PR [#4686](https://github.com/vatesfr/xen-orchestra/pull/4686))
|
||||
- [Host] Fix Advanced Live Telemetry URL (PR [#4687](https://github.com/vatesfr/xen-orchestra/pull/4687))
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
|
||||
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
|
||||
|
||||
### Released packages
|
||||
|
||||
@@ -21,5 +25,7 @@
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- xo-server v5.54.0
|
||||
- xo-web v5.54.0
|
||||
- xo-server-usage-report v0.7.3
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
- xo-server v5.47.0
|
||||
- xo-web v5.47.0
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
### Check list
|
||||
|
||||
> Check if done.
|
||||
>
|
||||
> Strikethrough if not relevant: ~~example~~ ([doc](https://help.github.com/en/articles/basic-writing-and-formatting-syntax)).
|
||||
> Check items when done or if not relevant
|
||||
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007` or `See xoa-support#42`)
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
|
||||
- [ ] if UI changes, a screenshot has been added to the PR
|
||||
- [ ] `CHANGELOG.unreleased.md`:
|
||||
- enhancement/bug fix entry added
|
||||
- list of packages to release updated (`${name} v${new version}`)
|
||||
- [ ] documentation updated
|
||||
- `CHANGELOG.unreleased.md`:
|
||||
- [ ] enhancement/bug fix entry added
|
||||
- [ ] list of packages to release updated (`${name} v${new version}`)
|
||||
- **I have tested added/updated features** (and impacted code)
|
||||
- [ ] unit tests (e.g. [`cron/parse.spec.js`](https://github.com/vatesfr/xen-orchestra/blob/b24400b21de1ebafa1099c56bac1de5c988d9202/%40xen-orchestra/cron/src/parse.spec.js))
|
||||
- [ ] if `xo-server` API changes, the corresponding test has been added to/updated on [`xo-server-test`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test)
|
||||
- [ ] at least manual testing
|
||||
- [ ] **I have tested added/updated features** (and impacted code)
|
||||
|
||||
### Process
|
||||
|
||||
@@ -21,10 +16,3 @@
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
From [_the Four Agreements_](https://en.wikipedia.org/wiki/Don_Miguel_Ruiz#The_Four_Agreements):
|
||||
|
||||
1. Be impeccable with your word.
|
||||
1. Don't take anything personally.
|
||||
1. Don't make assumptions.
|
||||
1. Always do your best.
|
||||
|
||||
@@ -13,11 +13,11 @@ It aims to be easy to use on any device supporting modern web technologies (HTML
|
||||
|
||||
## XOA quick deploy
|
||||
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
SSH to your XenServer, and execute the following:
|
||||
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
|
||||
[](https://xen-orchestra.com/#!/xoa)
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
### XOA credentials
|
||||
|
||||
|
||||
@@ -51,12 +51,10 @@
|
||||
* [Health](health.md)
|
||||
* [Job manager](scheduler.md)
|
||||
* [Alerts](alerts.md)
|
||||
* [Web hooks](web-hooks.md)
|
||||
* [Load balancing](load_balancing.md)
|
||||
* [Emergency Shutdown](emergency_shutdown.md)
|
||||
* [Auto scalability](auto_scalability.md)
|
||||
* [Forecaster](forecaster.md)
|
||||
* [SDN Controller](sdn_controller.md)
|
||||
* [Recipes](recipes.md)
|
||||
* [Reverse proxy](reverse_proxy.md)
|
||||
* [How to contribute?](contributing.md)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 99 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 85 KiB |
@@ -22,7 +22,7 @@ group = 'nogroup'
|
||||
By default, XO-server listens on all addresses (0.0.0.0) and runs on port 80. If you need to, you can change this in the `# Basic HTTP` section:
|
||||
|
||||
```toml
|
||||
hostname = '0.0.0.0'
|
||||
host = '0.0.0.0'
|
||||
port = 80
|
||||
```
|
||||
|
||||
@@ -31,7 +31,7 @@ port = 80
|
||||
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
|
||||
|
||||
```toml
|
||||
hostname = '0.0.0.0'
|
||||
host = '0.0.0.0'
|
||||
port = 443
|
||||
certificate = './certificate.pem'
|
||||
key = './key.pem'
|
||||
@@ -43,10 +43,10 @@ key = './key.pem'
|
||||
|
||||
If you want to redirect everything to HTTPS, you can modify the configuration like this:
|
||||
|
||||
```toml
|
||||
```
|
||||
# If set to true, all HTTP traffic will be redirected to the first HTTPs configuration.
|
||||
|
||||
redirectToHttps = true
|
||||
redirectToHttps: true
|
||||
```
|
||||
|
||||
This should be written just before the `mount` option, inside the `http:` block.
|
||||
|
||||
@@ -15,6 +15,5 @@ We've made multiple categories to help you to find what you need:
|
||||
* [Job Manager](scheduler.html)
|
||||
* [Alerts](alerts.html)
|
||||
* [Load balancing](load_balancing.html)
|
||||
* [SDN Controller](sdn_controller.html)
|
||||
|
||||

|
||||
|
||||
@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
|
||||
|
||||
```
|
||||
$ node -v
|
||||
v8.16.2
|
||||
v8.12.0
|
||||
```
|
||||
|
||||
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
|
||||
@@ -65,13 +65,17 @@ Now you have to create a config file for `xo-server`:
|
||||
|
||||
```
|
||||
$ cd packages/xo-server
|
||||
$ mkdir -p ~/.config/xo-server
|
||||
$ cp sample.config.toml ~/.config/xo-server/config.toml
|
||||
$ cp sample.config.toml .xo-server.toml
|
||||
```
|
||||
|
||||
> Note: If you're installing `xo-server` as a global service, you may want to copy the file to `/etc/xo-server/config.toml` instead.
|
||||
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory):
|
||||
|
||||
In this config file, you can change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
|
||||
```toml
|
||||
[http.mounts]
|
||||
'/' = '../xo-web/dist/'
|
||||
```
|
||||
|
||||
In this config file, you can also change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
|
||||
|
||||
You can try to start xo-server to see if it works. You should have something like this:
|
||||
|
||||
@@ -182,7 +186,7 @@ service redis start
|
||||
|
||||
## SUDO
|
||||
|
||||
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server` configuration file and setting `useSudo = true`. It's near the end of the file:
|
||||
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server/.xo-server.toml` and setting `useSudo = true`. It's near the end of the file:
|
||||
|
||||
```
|
||||
useSudo = true
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
|
||||
# Installation
|
||||
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||

|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
## [More on XOA and alternate deploy](xoa.md)
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||

|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# SDN Controller
|
||||
|
||||
> SDN Controller is available in XOA 5.44 and higher
|
||||
|
||||
The SDN Controller enables a user to **create pool-wide and cross-pool** (since XOA 5.48.1) **private networks**.
|
||||
|
||||

|
||||
|
||||
## How does it work?
|
||||
|
||||
Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.com/blog/xo-sdn-controller/) and its [extension for cross-pool private networks](https://xen-orchestra.com/blog/devblog-3-extending-the-sdn-controller/).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view:
|
||||
- Select a `pool`
|
||||
- Select `Private network`
|
||||
- Select an interface on which to create the network's tunnels
|
||||
- Select the encapsulation: a choice is offered between `GRE` and `VxLAN`, if `VxLAN` is chosen, then port 4789 must be open for UDP traffic on all the network's hosts (see [the requirements](#vxlan))
|
||||
- Choose if the network should be encrypted or not (see [the requirements](#encryption) to use encryption)
|
||||
- Select other `pool`s to add them to the network if desired
|
||||
- For each added `pool`: select an interface on which to create the tunnels
|
||||
- Create the network
|
||||
- Have fun! ☺
|
||||
|
||||
***NB:***
|
||||
- All hosts in a private network must be able to reach the other hosts' management interface.
|
||||
> The term ‘management interface’ is used to indicate the IP-enabled NIC that carries the management traffic.
|
||||
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: The path where the plugin will look for the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs`: Enable to uninstall the existing SDN controller CA certificate in order to replace it with the plugin's one.
|
||||
|
||||
## Requirements
|
||||
|
||||
### VxLAN
|
||||
|
||||
- On XCP-ng prior to 7.6:
|
||||
- To be able to use `VxLAN`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Encryption
|
||||
|
||||
> Encryption is not available prior to 8.0.
|
||||
|
||||
- On XCP-ng 8.0:
|
||||
- To be able to encrypt the networks, `openvswitch-ipsec` package must be installed on all the hosts:
|
||||
- `yum install openvswitch-ipsec --enablerepo=xcp-ng-testing`
|
||||
- `systemctl enable ipsec`
|
||||
- `systemctl enable openvswitch-ipsec`
|
||||
- `systemctl start ipsec`
|
||||
- `systemctl start openvswitch-ipsec`
|
||||
@@ -110,17 +110,16 @@ $ systemctl restart xo-server
|
||||
|
||||
### Behind a transparent proxy
|
||||
|
||||
If you're behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
If your are behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
|
||||
Run the following commands to allow the updater to work:
|
||||
First, run the following commands:
|
||||
|
||||
```
|
||||
$ sudo -s
|
||||
$ echo NODE_TLS_REJECT_UNAUTHORIZED=0 >> /etc/xo-appliance/env
|
||||
$ npm config -g set strict-ssl=false
|
||||
$ systemctl restart xoa-updater
|
||||
```
|
||||
Now try running an update again.
|
||||
|
||||
Then, restart the updater with `systemctl restart xoa-updater`.
|
||||
|
||||
### Updating SSL self-signed certificate
|
||||
|
||||
|
||||
@@ -41,20 +41,6 @@ However, if you want to start a manual check, you can do it by clicking on the "
|
||||
|
||||

|
||||
|
||||
#### Release channel
|
||||
In Xen Orchestra, you can make a choice between two different release channels.
|
||||
|
||||
##### Stable
|
||||
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
|
||||
|
||||
##### Latest
|
||||
|
||||
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
|
||||
|
||||
> To select the release channel of your choice, go to the XOA > Updates view.
|
||||
|
||||

|
||||
|
||||
#### Upgrade
|
||||
|
||||
If a new version is found, you'll have an upgrade button and its tooltip displayed:
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
# Web hooks
|
||||
|
||||
⚠ This feature is experimental!
|
||||
|
||||
## Configuration
|
||||
|
||||
The plugin "web-hooks" needs to be installed and loaded for this feature to work.
|
||||
|
||||
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called.
|
||||
|
||||
* Go to Settings > Plugins > Web hooks
|
||||
* Add new hooks
|
||||
* For each hook, configure:
|
||||
* Method: the XO API method that will trigger the HTTP request when called
|
||||
* Type:
|
||||
* pre: the request will be sent when the method is called
|
||||
* post: the request will be sent after the method action is completed
|
||||
* pre/post: both
|
||||
* URL: the full URL which the requests will be sent to
|
||||
* Save the plugin configuration
|
||||
|
||||
From now on, a request will be sent to the corresponding URLs when a configured method is called by an XO client.
|
||||
|
||||
## Request content
|
||||
|
||||
```
|
||||
POST / HTTP/1.1
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
The request's body is a JSON string representing an object with the following properties:
|
||||
|
||||
- `type`: `"pre"` or `"post"`
|
||||
- `callId`: unique ID for this call to help match a pre-call and a post-call
|
||||
- `userId`: unique internal ID of the user who performed the call
|
||||
- `userName`: login/e-mail address of the user who performed the call
|
||||
- `method`: name of the method that was called (e.g. `"vm.start"`)
|
||||
- `params`: call parameters (object)
|
||||
- `timestamp`: epoch timestamp of the beginning ("pre") or end ("post") of the call in ms
|
||||
- `duration`: duration of the call in ms ("post" hooks only)
|
||||
- `result`: call result on success ("post" hooks only)
|
||||
- `error`: call result on error ("post" hooks only)
|
||||
|
||||
## Request handling
|
||||
|
||||
*Quick Node.js example of how you may want to handle the requests*
|
||||
|
||||
```js
|
||||
const http = require('http')
|
||||
const { exec } = require('child_process')
|
||||
|
||||
http
|
||||
.createServer((req, res) => {
|
||||
let body = ''
|
||||
req.on('data', chunk => {
|
||||
body += chunk
|
||||
})
|
||||
req.on('end', () => handleHook(body))
|
||||
res.end()
|
||||
})
|
||||
.listen(3000)
|
||||
|
||||
const handleHook = data => {
|
||||
const { method, params, type, result, error, timestamp } = JSON.parse(data)
|
||||
|
||||
// Log it
|
||||
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params} → ${result || error}`)
|
||||
|
||||
// Run scripts
|
||||
exec(`./hook-scripts/${method}-${type}.sh`)
|
||||
}
|
||||
```
|
||||
66
docs/xoa.md
66
docs/xoa.md
@@ -22,36 +22,26 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
|
||||
|
||||
### The quickest way
|
||||
|
||||
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go to https://xen-orchestra.com/#!/xoa and follow the instructions.
|
||||
|
||||
> **Note:** no data will be sent to our servers, the deployment only runs between your browser and your host!
|
||||
|
||||

|
||||
|
||||
### Via a bash script
|
||||
|
||||
Alternatively, you can deploy it by connecting to your XenServer host and executing the following:
|
||||
The fastest way to install Xen Orchestra is to use our appliance deploy script. You can deploy it by connecting to your XenServer host and executing the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
**Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
> **Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
Follow the instructions:
|
||||
Now follow the instructions:
|
||||
|
||||
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS server should be provided.
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
|
||||
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
|
||||
|
||||
### Via a manual XVA download
|
||||
### The alternative
|
||||
|
||||
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
|
||||
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
|
||||
|
||||
## First Login
|
||||
|
||||
Once you have started the VM, you can access the web UI by putting the IP you configured during deployment into your web browser. If you did not configure an IP or are unsure, try one of the following methods to find it:
|
||||
|
||||
* Run `xe vm-list params=name-label,networks | grep -A 1 XOA` on your host
|
||||
@@ -64,35 +54,6 @@ Once you have started the VM, you can access the web UI by putting the IP you co
|
||||
|
||||
**The first thing** you need to do with your XOA is register. [Read the documentation on the page dedicated to the updater/register inferface](updater.md).
|
||||
|
||||
## Technical Support
|
||||
|
||||
In your appliance, you can access the support section in the XOA menu. In this section you can:
|
||||
|
||||
* launch an `xoa check` command
|
||||
|
||||

|
||||
|
||||
* Open a secure support tunnel so our team can remotely investigate
|
||||
|
||||

|
||||
|
||||
<a id="ssh-pro-support"></a>
|
||||
|
||||
If your web UI is not working, you can also open the secure support tunnel from the CLI. To open a private tunnel (we are the only one with the private key), you can use the command `xoa support tunnel` like below:
|
||||
|
||||
```
|
||||
$ xoa support tunnel
|
||||
The support tunnel has been created.
|
||||
|
||||
Do not stop this command before the intervention is over!
|
||||
Give this id to the support: 40713
|
||||
```
|
||||
|
||||
Give us this number, and we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
|
||||
|
||||
> The tunnel utilizes the user `xoa-support`. If you want to deactivate this bundled user, you can run `chage -E 0 xoa-support`. To re-activate this account, you must run `chage -E 1 xoa-support`.
|
||||
|
||||
|
||||
### First console connection
|
||||
|
||||
If you connect via SSH or console, the default credentials are:
|
||||
@@ -185,6 +146,21 @@ You can access the VM console through XenCenter or using VNC through a SSH tunne
|
||||
|
||||
If you want to go back in DHCP, just run `xoa network dhcp`
|
||||
|
||||
### SSH Pro Support
|
||||
|
||||
By default, if you need support, there is a dedicated user named `xoa-support`. We are the only one with the private key. If you want our assistance on your XOA, you can open a private tunnel with the command `xoa support tunnel` like below:
|
||||
|
||||
```
|
||||
$ xoa support tunnel
|
||||
The support tunnel has been created.
|
||||
|
||||
Do not stop this command before the intervention is over!
|
||||
Give this id to the support: 40713
|
||||
```
|
||||
|
||||
Give us this number, we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
|
||||
|
||||
> If you want to deactivate this bundled user, you can type `chage -E 0 xoa-support`. To re-activate this account, you must use the `chage -E 1 xoa-support`.
|
||||
|
||||
### Firewall
|
||||
|
||||
|
||||
12
package.json
12
package.json
@@ -8,22 +8,22 @@
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "14.1.0",
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^10.0.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.112.0",
|
||||
"flow-bin": "^0.102.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -46,7 +46,6 @@
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"timers": "fake",
|
||||
"transform": {
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
@@ -60,7 +59,6 @@
|
||||
"posttest": "scripts/run-script test",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\"",
|
||||
"travis-tests": "scripts/travis-tests"
|
||||
|
||||
@@ -35,8 +35,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -24,26 +24,26 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"cli-progress": "^3.1.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.7.2"
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^3.2.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.7.2",
|
||||
"version": "0.7.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -18,17 +18,15 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -37,15 +35,15 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.2",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"execa": "^3.2.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^3.0.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -17,7 +17,10 @@ export default async function readChunk(stream, n) {
|
||||
resolve(Buffer.concat(chunks, i))
|
||||
}
|
||||
|
||||
const onEnd = resolve2
|
||||
function onEnd() {
|
||||
resolve2()
|
||||
clean()
|
||||
}
|
||||
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
@@ -31,11 +34,8 @@ export default async function readChunk(stream, n) {
|
||||
}
|
||||
i += chunk.length
|
||||
chunks.push(chunk)
|
||||
|
||||
if (i === n) {
|
||||
if (i >= n) {
|
||||
resolve2()
|
||||
} else if (i > n) {
|
||||
throw new RangeError(`read (${i}) more than expected (${n})`)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,13 +29,13 @@ export default asyncIteratorToStream(async function*(size, blockParser) {
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.logicalAddressBytes - position
|
||||
const paddingLength = next.offsetBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield* filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.logicalAddressBytes + next.data.length
|
||||
position = next.offsetBytes + next.data.length
|
||||
}
|
||||
yield* filePadding(actualSize - position)
|
||||
yield footer
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { forEachRight } from 'lodash'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
@@ -18,65 +17,38 @@ import { set as setBitmap } from './_bitmap'
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* Looks once backwards to collect the last fragment of each VHD block (they could be interleaved),
|
||||
* then allocates the blocks in a forwards pass.
|
||||
* @returns currentVhdPositionSector the first free sector after the data
|
||||
*/
|
||||
function createBAT(
|
||||
firstBlockPosition,
|
||||
fragmentLogicAddressList,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
const lastFragmentPerBlock = new Map()
|
||||
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
|
||||
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
|
||||
const vhdTableIndex = Math.floor(
|
||||
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
|
||||
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
|
||||
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
})
|
||||
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
|
||||
// lastFragmentPerBlock is from last to first, so we go the other way around
|
||||
forEachRight(
|
||||
lastFragmentPerBlockArray,
|
||||
([vhdTableIndex, _fragmentVirtualAddress]) => {
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
)
|
||||
return [currentVhdPositionSector, lastFragmentPerBlock]
|
||||
return currentVhdPositionSector
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives an iterator of constant sized fragments, and a list of their address in virtual space, and returns
|
||||
* a stream representing the VHD file of this disk.
|
||||
* The fragment size should be an integer divider of the VHD block size.
|
||||
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
|
||||
* @param diskSize
|
||||
* @param fragmentSize
|
||||
* @param fragmentLogicalAddressList
|
||||
* @param fragmentIterator
|
||||
* @returns {Promise<Function>}
|
||||
*/
|
||||
|
||||
export default async function createReadableStream(
|
||||
diskSize,
|
||||
fragmentSize,
|
||||
fragmentLogicalAddressList,
|
||||
fragmentIterator
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${fragmentSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
@@ -108,72 +80,60 @@ export default async function createReadableStream(
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
const [endOfData, lastFragmentPerBlock] = createBAT(
|
||||
const endOfData = createBAT(
|
||||
firstBlockPosition,
|
||||
fragmentLogicalAddressList,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
)
|
||||
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
|
||||
let position = 0
|
||||
|
||||
function* yieldAndTrack(buffer, expectedPosition, reason) {
|
||||
function* yieldAndTrack(buffer, expectedPosition) {
|
||||
if (expectedPosition !== undefined) {
|
||||
assert.strictEqual(position, expectedPosition, reason)
|
||||
assert.strictEqual(position, expectedPosition)
|
||||
}
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
position += buffer.length
|
||||
}
|
||||
}
|
||||
|
||||
function insertFragmentInBlock(fragment, blockWithBitmap) {
|
||||
const fragmentOffsetInBlock =
|
||||
(fragment.logicalAddressBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
|
||||
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
|
||||
setBitmap(blockWithBitmap, fragmentOffsetInBlock + bitPos)
|
||||
}
|
||||
fragment.data.copy(
|
||||
blockWithBitmap,
|
||||
bitmapSize + (fragment.logicalAddressBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
)
|
||||
}
|
||||
|
||||
async function* generateBlocks(fragmentIterator, bitmapSize) {
|
||||
let currentFragmentIndex = -1
|
||||
// store blocks waiting for some of their fragments.
|
||||
const batIndexToBlockMap = new Map()
|
||||
for await (const fragment of fragmentIterator) {
|
||||
currentFragmentIndex++
|
||||
const batIndex = Math.floor(
|
||||
fragment.logicalAddressBytes / VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
let currentBlockWithBitmap = batIndexToBlockMap.get(batIndex)
|
||||
if (currentBlockWithBitmap === undefined) {
|
||||
async function* generateFileContent(blockIterator, bitmapSize, ratio) {
|
||||
let currentBlock = -1
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockWithBitmap = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
currentBlock++
|
||||
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
|
||||
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batIndex !== currentVhdBlockIndex) {
|
||||
if (currentVhdBlockIndex >= 0) {
|
||||
yield* yieldAndTrack(
|
||||
currentBlockWithBitmap,
|
||||
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
|
||||
)
|
||||
}
|
||||
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
|
||||
batIndexToBlockMap.set(batIndex, currentBlockWithBitmap)
|
||||
currentVhdBlockIndex = batIndex
|
||||
}
|
||||
insertFragmentInBlock(fragment, currentBlockWithBitmap)
|
||||
const batEntry = bat.readUInt32BE(batIndex * 4)
|
||||
assert.notStrictEqual(batEntry, BLOCK_UNUSED)
|
||||
const batPosition = batEntry * SECTOR_SIZE
|
||||
if (lastFragmentPerBlock.get(batIndex) === fragment.logicalAddressBytes) {
|
||||
batIndexToBlockMap.delete(batIndex)
|
||||
yield* yieldAndTrack(
|
||||
currentBlockWithBitmap,
|
||||
batPosition,
|
||||
`VHD block start index: ${currentFragmentIndex}`
|
||||
)
|
||||
const blockOffset =
|
||||
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
|
||||
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
|
||||
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
|
||||
}
|
||||
next.data.copy(
|
||||
currentBlockWithBitmap,
|
||||
bitmapSize + (next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
)
|
||||
}
|
||||
yield* yieldAndTrack(currentBlockWithBitmap)
|
||||
}
|
||||
|
||||
async function* iterator() {
|
||||
yield* yieldAndTrack(footer, 0)
|
||||
yield* yieldAndTrack(header, FOOTER_SIZE)
|
||||
yield* yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
|
||||
yield* generateBlocks(fragmentIterator, bitmapSize)
|
||||
yield* generateFileContent(blockIterator, bitmapSize, ratio)
|
||||
yield* yieldAndTrack(footer)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import resolveRelativeFromFile from './_resolveRelativeFromFile'
|
||||
|
||||
@@ -14,23 +13,18 @@ import {
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const { warn } = createLogger('vhd-lib:createSyntheticStream')
|
||||
|
||||
export default async function createSyntheticStream(handler, paths) {
|
||||
export default async function createSyntheticStream(handler, path) {
|
||||
const fds = []
|
||||
const cleanup = () => {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
warn('error while closing file', {
|
||||
error,
|
||||
fd: fds[i],
|
||||
})
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
try {
|
||||
const vhds = []
|
||||
const open = async path => {
|
||||
while (true) {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
@@ -38,18 +32,11 @@ export default async function createSyntheticStream(handler, paths) {
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
|
||||
return vhd
|
||||
}
|
||||
if (typeof paths === 'string') {
|
||||
let path = paths
|
||||
let vhd
|
||||
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
} else {
|
||||
for (const path of paths) {
|
||||
await open(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
|
||||
@@ -6,8 +6,11 @@ export { default as chainVhd } from './chain'
|
||||
export { default as checkVhdChain } from './checkChain'
|
||||
export { default as createContentStream } from './createContentStream'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export { default as createReadableSparseStream } from './createReadableSparseStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
|
||||
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
|
||||
export {
|
||||
default as createVhdStreamWithLength,
|
||||
} from './createVhdStreamWithLength'
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
import readChunk from './_readChunk'
|
||||
import { FOOTER_SIZE } from './_constants'
|
||||
import { fuFooter } from './_structs'
|
||||
|
||||
export default async function peekFooterFromStream(stream) {
|
||||
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
stream.unshift(footerBuffer)
|
||||
return footer
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import assert from 'assert'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
@@ -16,7 +15,10 @@ import {
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const { debug } = createLogger('vhd-lib:Vhd')
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
@@ -38,11 +40,9 @@ const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
const expected = checksumStruct(buf, struct)
|
||||
assert.strictEqual(
|
||||
actual,
|
||||
expected,
|
||||
`invalid ${name} checksum ${actual}, expected ${expected}`
|
||||
)
|
||||
if (actual !== expected) {
|
||||
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
|
||||
}
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
@@ -102,7 +102,7 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
_getEndOfHeaders() {
|
||||
getEndOfHeaders() {
|
||||
const { header } = this
|
||||
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
@@ -127,8 +127,8 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Returns the first sector after data.
|
||||
_getEndOfData() {
|
||||
let end = Math.ceil(this._getEndOfHeaders() / SECTOR_SIZE)
|
||||
getEndOfData() {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
@@ -309,8 +309,8 @@ export default class Vhd {
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async _createBlock(blockId) {
|
||||
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
|
||||
async createBlock(blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
@@ -325,7 +325,7 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
// Write a bitmap at a block address.
|
||||
async _writeBlockBitmap(blockAddr, bitmap) {
|
||||
async writeBlockBitmap(blockAddr, bitmap) {
|
||||
const { bitmapSize } = this
|
||||
|
||||
if (bitmap.length !== bitmapSize) {
|
||||
@@ -342,20 +342,20 @@ export default class Vhd {
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async _writeEntireBlock(block) {
|
||||
async writeEntireBlock(block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this._createBlock(block.id)
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async _writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
|
||||
async writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this._createBlock(block.id)
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
|
||||
} else if (parentBitmap === undefined) {
|
||||
parentBitmap = (await this._readBlock(block.id, true)).bitmap
|
||||
@@ -364,14 +364,14 @@ export default class Vhd {
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`_writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this._writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
@@ -407,12 +407,12 @@ export default class Vhd {
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this._writeEntireBlock(block)
|
||||
await this.writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this._writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
@@ -429,7 +429,7 @@ export default class Vhd {
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
const eof = await this._handler.getSize(this._path)
|
||||
// sometimes the file is longer than anticipated, we still need to put the footer at the end
|
||||
const offset = Math.max(this._getEndOfData(), eof - rawFooter.length)
|
||||
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
@@ -500,7 +500,7 @@ export default class Vhd {
|
||||
endInBuffer
|
||||
)
|
||||
}
|
||||
await this._writeBlockSectors(
|
||||
await this.writeBlockSectors(
|
||||
{ id: currentBlock, data: inputBuffer },
|
||||
offsetInBlockSectors,
|
||||
endInBlockSectors
|
||||
@@ -509,7 +509,7 @@ export default class Vhd {
|
||||
await this.writeFooter()
|
||||
}
|
||||
|
||||
async _ensureSpaceForParentLocators(neededSectors) {
|
||||
async ensureSpaceForParentLocators(neededSectors) {
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
@@ -528,7 +528,7 @@ export default class Vhd {
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this._ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
|
||||
@@ -31,11 +31,11 @@ test('createFooter() does not crash', () => {
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
logicalAddressBytes: 100,
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
logicalAddressBytes: 700,
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -62,11 +62,11 @@ test('ReadableRawVHDStream does not crash', async () => {
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
logicalAddressBytes: 700,
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
logicalAddressBytes: 100,
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -97,11 +97,11 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
logicalAddressBytes: blockSize * 3,
|
||||
offsetBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
logicalAddressBytes: blockSize * 100,
|
||||
offsetBytes: blockSize * 100,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
@@ -109,7 +109,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const stream = await createReadableSparseStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
blocks.map(b => b.logicalAddressBytes),
|
||||
blocks.map(b => b.offsetBytes),
|
||||
blocks
|
||||
)
|
||||
expect(stream.length).toEqual(4197888)
|
||||
@@ -128,7 +128,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.logicalAddressBytes)
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
|
||||
@@ -36,20 +36,20 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"archy": "^1.0.0",
|
||||
"chalk": "^3.0.0",
|
||||
"chalk": "^2.3.2",
|
||||
"exec-promise": "^0.7.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.27.3"
|
||||
"xen-api": "^0.27.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.5",
|
||||
"@babel/preset-env": "^7.1.5",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.4",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -4,7 +4,6 @@ process.env.DEBUG = '*'
|
||||
|
||||
const defer = require('golike-defer').default
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
|
||||
const { createClient } = require('../')
|
||||
|
||||
@@ -33,13 +32,8 @@ defer(async ($defer, args) => {
|
||||
const { cancel, token } = CancelToken.source()
|
||||
process.on('SIGINT', cancel)
|
||||
|
||||
let input = createInputStream(args[2])
|
||||
if (!raw && input.length === undefined) {
|
||||
input = await createVhdStreamWithLength(input)
|
||||
}
|
||||
|
||||
// https://xapi-project.github.io/xen-api/snapshots.html#uploading-a-disk-or-snapshot
|
||||
await xapi.putResource(token, input, '/import_raw_vdi/', {
|
||||
await xapi.putResource(token, createInputStream(args[2]), '/import_raw_vdi/', {
|
||||
query: {
|
||||
format: raw ? 'raw' : 'vhd',
|
||||
vdi: await resolveRef(xapi, 'VDI', args[1])
|
||||
|
||||
310
packages/xen-api/examples/package-lock.json
generated
310
packages/xen-api/examples/package-lock.json
generated
@@ -1,310 +0,0 @@
|
||||
{
|
||||
"requires": true,
|
||||
"lockfileVersion": 1,
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": {
|
||||
"version": "0.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@xen-orchestra/log/-/log-0.2.0.tgz",
|
||||
"integrity": "sha512-xNseJ/TIUdASm9uxr0zVvg8qDG+Xw6ycJy4dag+e1yl6pEr77GdPJD2R0JbE1BbZwup/Skh3TEh6L0GV+9NRdQ==",
|
||||
"requires": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
}
|
||||
},
|
||||
"async-iterator-to-stream": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/async-iterator-to-stream/-/async-iterator-to-stream-1.1.0.tgz",
|
||||
"integrity": "sha512-ddF3u7ipixenFJsYCKqVR9tNdkIzd2j7JVg8QarqkfUl7UTR7nhJgc1Q+3ebP/5DNFhV9Co9F47FJjGpdc0PjQ==",
|
||||
"requires": {
|
||||
"readable-stream": "^3.0.5"
|
||||
}
|
||||
},
|
||||
"core-js": {
|
||||
"version": "3.4.1",
|
||||
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.4.1.tgz",
|
||||
"integrity": "sha512-KX/dnuY/J8FtEwbnrzmAjUYgLqtk+cxM86hfG60LGiW3MmltIc2yAmDgBgEkfm0blZhUrdr1Zd84J2Y14mLxzg=="
|
||||
},
|
||||
"core-util-is": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
|
||||
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
|
||||
},
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"event-loop-delay": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/event-loop-delay/-/event-loop-delay-1.0.0.tgz",
|
||||
"integrity": "sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==",
|
||||
"requires": {
|
||||
"napi-macros": "^1.8.2",
|
||||
"node-gyp-build": "^3.7.0"
|
||||
}
|
||||
},
|
||||
"from2": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
|
||||
"integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
|
||||
"requires": {
|
||||
"inherits": "^2.0.1",
|
||||
"readable-stream": "^2.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"readable-stream": {
|
||||
"version": "2.3.6",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
|
||||
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
|
||||
"requires": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fs-extra": {
|
||||
"version": "8.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
|
||||
"integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
|
||||
"requires": {
|
||||
"graceful-fs": "^4.2.0",
|
||||
"jsonfile": "^4.0.0",
|
||||
"universalify": "^0.1.0"
|
||||
}
|
||||
},
|
||||
"getopts": {
|
||||
"version": "2.2.5",
|
||||
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
|
||||
"integrity": "sha512-9jb7AW5p3in+IiJWhQiZmmwkpLaR/ccTWdWQCtZM66HJcHHLegowh4q4tSD7gouUyeNvFWRavfK9GXosQHDpFA=="
|
||||
},
|
||||
"golike-defer": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
|
||||
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
|
||||
},
|
||||
"graceful-fs": {
|
||||
"version": "4.2.3",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
|
||||
"integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
|
||||
},
|
||||
"human-format": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
|
||||
"integrity": "sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg=="
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"isarray": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
||||
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
|
||||
},
|
||||
"jsonfile": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
|
||||
"integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
|
||||
"requires": {
|
||||
"graceful-fs": "^4.1.6"
|
||||
}
|
||||
},
|
||||
"limit-concurrency-decorator": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/limit-concurrency-decorator/-/limit-concurrency-decorator-0.4.0.tgz",
|
||||
"integrity": "sha512-hXGTuCkYjosfHT1D7dcPKzPHSGwBtZfN0wummzDwxi5A3ZUNBB75qM8phKEjQGlQGAfYrMW/JqhbaljO3xOH0A=="
|
||||
},
|
||||
"lodash": {
|
||||
"version": "4.17.15",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
|
||||
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
|
||||
},
|
||||
"make-error": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
|
||||
"integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g=="
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"napi-macros": {
|
||||
"version": "1.8.2",
|
||||
"resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-1.8.2.tgz",
|
||||
"integrity": "sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg=="
|
||||
},
|
||||
"node-gyp-build": {
|
||||
"version": "3.9.0",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-3.9.0.tgz",
|
||||
"integrity": "sha512-zLcTg6P4AbcHPq465ZMFNXx7XpKKJh+7kkN699NiQWisR2uWYOWNWqRHAmbnmKiL4e9aLSlmy5U7rEMUXV59+A=="
|
||||
},
|
||||
"prettier-bytes": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/prettier-bytes/-/prettier-bytes-1.0.4.tgz",
|
||||
"integrity": "sha1-mUsCqkb2mcULYle1+qp/4lV+YtY="
|
||||
},
|
||||
"process-nextick-args": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
|
||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
|
||||
},
|
||||
"process-top": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/process-top/-/process-top-1.0.0.tgz",
|
||||
"integrity": "sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==",
|
||||
"requires": {
|
||||
"event-loop-delay": "^1.0.0",
|
||||
"prettier-bytes": "^1.0.4"
|
||||
}
|
||||
},
|
||||
"progress-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/progress-stream/-/progress-stream-2.0.0.tgz",
|
||||
"integrity": "sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=",
|
||||
"requires": {
|
||||
"speedometer": "~1.0.0",
|
||||
"through2": "~2.0.3"
|
||||
}
|
||||
},
|
||||
"promise-toolbox": {
|
||||
"version": "0.13.0",
|
||||
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.13.0.tgz",
|
||||
"integrity": "sha512-Z6u7EL9/QyY1zZqeqpEiKS7ygKwZyl0JL0ouno/en6vMliZZc4AmM0aFCrDAVxEyKqj2f3SpkW0lXEfAZsNWiQ==",
|
||||
"requires": {
|
||||
"make-error": "^1.3.2"
|
||||
}
|
||||
},
|
||||
"readable-stream": {
|
||||
"version": "3.4.0",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.4.0.tgz",
|
||||
"integrity": "sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==",
|
||||
"requires": {
|
||||
"inherits": "^2.0.3",
|
||||
"string_decoder": "^1.1.1",
|
||||
"util-deprecate": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"safe-buffer": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
||||
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
|
||||
},
|
||||
"speedometer": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/speedometer/-/speedometer-1.0.0.tgz",
|
||||
"integrity": "sha1-zWccsGdSwivKM3Di8zREC+T8YuI="
|
||||
},
|
||||
"stream-parser": {
|
||||
"version": "0.3.1",
|
||||
"resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz",
|
||||
"integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=",
|
||||
"requires": {
|
||||
"debug": "2"
|
||||
}
|
||||
},
|
||||
"string_decoder": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
|
||||
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
|
||||
"requires": {
|
||||
"safe-buffer": "~5.1.0"
|
||||
}
|
||||
},
|
||||
"struct-fu": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/struct-fu/-/struct-fu-1.2.1.tgz",
|
||||
"integrity": "sha512-QrtfoBRe+RixlBJl852/Gu7tLLTdx3kWs3MFzY1OHNrSsYYK7aIAnzqsncYRWrKGG/QSItDmOTlELMxehw4Gjw=="
|
||||
},
|
||||
"throttle": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
|
||||
"integrity": "sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=",
|
||||
"requires": {
|
||||
"readable-stream": ">= 0.3.0",
|
||||
"stream-parser": ">= 0.0.2"
|
||||
}
|
||||
},
|
||||
"through2": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
|
||||
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
|
||||
"requires": {
|
||||
"readable-stream": "~2.3.6",
|
||||
"xtend": "~4.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"readable-stream": {
|
||||
"version": "2.3.6",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
|
||||
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
|
||||
"requires": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"universalify": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
|
||||
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
|
||||
},
|
||||
"util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
|
||||
},
|
||||
"uuid": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz",
|
||||
"integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ=="
|
||||
},
|
||||
"vhd-lib": {
|
||||
"version": "0.7.1",
|
||||
"resolved": "https://registry.npmjs.org/vhd-lib/-/vhd-lib-0.7.1.tgz",
|
||||
"integrity": "sha512-TODzo7KjtNzYF/NuJjE5bPeGyXZIUzAOVJvED1dcPXr8iSnS6/U5aNdtKahBVwukEzf0/x+Cu3GMYutV4/cxsQ==",
|
||||
"requires": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": {
|
||||
"version": "0.14.0",
|
||||
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.14.0.tgz",
|
||||
"integrity": "sha512-VV5lXK4lXaPB9oBO50ope1qd0AKN8N3nK14jYvV9/qFmfZW2Px/bJjPZBniGjXcIJf6J5Y/coNgJtPHDyiUV/g==",
|
||||
"requires": {
|
||||
"make-error": "^1.3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
|
||||
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,9 +5,8 @@
|
||||
"human-format": "^0.10.1",
|
||||
"process-top": "^1.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3",
|
||||
"vhd-lib": "^0.7.2"
|
||||
"throttle": "^1.0.3"
|
||||
}
|
||||
}
|
||||
|
||||
179
packages/xen-api/examples/yarn.lock
Normal file
179
packages/xen-api/examples/yarn.lock
Normal file
@@ -0,0 +1,179 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
core-util-is@~1.0.0:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
|
||||
integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
|
||||
|
||||
debug@2:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
|
||||
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
|
||||
dependencies:
|
||||
ms "2.0.0"
|
||||
|
||||
event-loop-delay@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/event-loop-delay/-/event-loop-delay-1.0.0.tgz#5af6282549494fd0d868c499cbdd33e027978b8c"
|
||||
integrity sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==
|
||||
dependencies:
|
||||
napi-macros "^1.8.2"
|
||||
node-gyp-build "^3.7.0"
|
||||
|
||||
getopts@^2.2.3:
|
||||
version "2.2.3"
|
||||
resolved "https://registry.yarnpkg.com/getopts/-/getopts-2.2.3.tgz#11d229775e2ec2067ed8be6fcc39d9b4bf39cf7d"
|
||||
integrity sha512-viEcb8TpgeG05+Nqo5EzZ8QR0hxdyrYDp6ZSTZqe2M/h53Bk036NmqG38Vhf5RGirC/Of9Xql+v66B2gp256SQ==
|
||||
|
||||
golike-defer@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
|
||||
|
||||
human-format@^0.10.1:
|
||||
version "0.10.1"
|
||||
resolved "https://registry.yarnpkg.com/human-format/-/human-format-0.10.1.tgz#107793f355912e256148d5b5dcf66a0230187ee9"
|
||||
integrity sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg==
|
||||
|
||||
inherits@^2.0.3, inherits@~2.0.3:
|
||||
version "2.0.3"
|
||||
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
|
||||
integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
|
||||
|
||||
isarray@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
|
||||
integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=
|
||||
|
||||
make-error@^1.3.2:
|
||||
version "1.3.5"
|
||||
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.5.tgz#efe4e81f6db28cadd605c70f29c831b58ef776c8"
|
||||
integrity sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==
|
||||
|
||||
ms@2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
|
||||
integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
|
||||
|
||||
napi-macros@^1.8.2:
|
||||
version "1.8.2"
|
||||
resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda"
|
||||
integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg==
|
||||
|
||||
node-gyp-build@^3.7.0:
|
||||
version "3.7.0"
|
||||
resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.7.0.tgz#daa77a4f547b9aed3e2aac779eaf151afd60ec8d"
|
||||
integrity sha512-L/Eg02Epx6Si2NXmedx+Okg+4UHqmaf3TNcxd50SF9NQGcJaON3AtU++kax69XV7YWz4tUspqZSAsVofhFKG2w==
|
||||
|
||||
prettier-bytes@^1.0.4:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/prettier-bytes/-/prettier-bytes-1.0.4.tgz#994b02aa46f699c50b6257b5faaa7fe2557e62d6"
|
||||
integrity sha1-mUsCqkb2mcULYle1+qp/4lV+YtY=
|
||||
|
||||
process-nextick-args@~2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa"
|
||||
integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==
|
||||
|
||||
process-top@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-top/-/process-top-1.0.0.tgz#52892bedb581c5abf0df2d0aa5c429e34275cc7e"
|
||||
integrity sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==
|
||||
dependencies:
|
||||
event-loop-delay "^1.0.0"
|
||||
prettier-bytes "^1.0.4"
|
||||
|
||||
progress-stream@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/progress-stream/-/progress-stream-2.0.0.tgz#fac63a0b3d11deacbb0969abcc93b214bce19ed5"
|
||||
integrity sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=
|
||||
dependencies:
|
||||
speedometer "~1.0.0"
|
||||
through2 "~2.0.3"
|
||||
|
||||
promise-toolbox@^0.11.0:
|
||||
version "0.11.0"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.11.0.tgz#9ed928355355395072dace3f879879504e07d1bc"
|
||||
integrity sha512-bjHk0kq+Ke3J3zbkbbJH6kXCyQZbFHwOTrE/Et7vS0uS0tluoV+PLqU/kEyxl8aARM7v04y2wFoDo/wWAEPvjA==
|
||||
dependencies:
|
||||
make-error "^1.3.2"
|
||||
|
||||
"readable-stream@>= 0.3.0", readable-stream@^3.1.1:
|
||||
version "3.1.1"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.1.1.tgz#ed6bbc6c5ba58b090039ff18ce670515795aeb06"
|
||||
integrity sha512-DkN66hPyqDhnIQ6Jcsvx9bFjhw214O4poMBcIMgPVpQvNy9a0e0Uhg5SqySyDKAmUlwt8LonTBz1ezOnM8pUdA==
|
||||
dependencies:
|
||||
inherits "^2.0.3"
|
||||
string_decoder "^1.1.1"
|
||||
util-deprecate "^1.0.1"
|
||||
|
||||
readable-stream@~2.3.6:
|
||||
version "2.3.6"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
|
||||
integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==
|
||||
dependencies:
|
||||
core-util-is "~1.0.0"
|
||||
inherits "~2.0.3"
|
||||
isarray "~1.0.0"
|
||||
process-nextick-args "~2.0.0"
|
||||
safe-buffer "~5.1.1"
|
||||
string_decoder "~1.1.1"
|
||||
util-deprecate "~1.0.1"
|
||||
|
||||
safe-buffer@~5.1.0, safe-buffer@~5.1.1:
|
||||
version "5.1.2"
|
||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
|
||||
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
|
||||
|
||||
speedometer@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/speedometer/-/speedometer-1.0.0.tgz#cd671cb06752c22bca3370e2f334440be4fc62e2"
|
||||
integrity sha1-zWccsGdSwivKM3Di8zREC+T8YuI=
|
||||
|
||||
"stream-parser@>= 0.0.2":
|
||||
version "0.3.1"
|
||||
resolved "https://registry.yarnpkg.com/stream-parser/-/stream-parser-0.3.1.tgz#1618548694420021a1182ff0af1911c129761773"
|
||||
integrity sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=
|
||||
dependencies:
|
||||
debug "2"
|
||||
|
||||
string_decoder@^1.1.1:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.2.0.tgz#fe86e738b19544afe70469243b2a1ee9240eae8d"
|
||||
integrity sha512-6YqyX6ZWEYguAxgZzHGL7SsCeGx3V2TtOTqZz1xSTSWnqsbWwbptafNyvf/ACquZUXV3DANr5BDIwNYe1mN42w==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
string_decoder@~1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
|
||||
integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
throttle@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/throttle/-/throttle-1.0.3.tgz#8a32e4a15f1763d997948317c5ebe3ad8a41e4b7"
|
||||
integrity sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=
|
||||
dependencies:
|
||||
readable-stream ">= 0.3.0"
|
||||
stream-parser ">= 0.0.2"
|
||||
|
||||
through2@~2.0.3:
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
|
||||
integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
|
||||
dependencies:
|
||||
readable-stream "~2.3.6"
|
||||
xtend "~4.0.1"
|
||||
|
||||
util-deprecate@^1.0.1, util-deprecate@~1.0.1:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
|
||||
integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
|
||||
|
||||
xtend@~4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
|
||||
integrity sha1-pcbVMr5lbiPbgg77lDofBJmNY68=
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.27.3",
|
||||
"version": "0.27.1",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -60,8 +60,8 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?(?:\[([^\]]+)\]|([^:/]+))(?::([0-9]+))?\/?$/
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
@@ -6,15 +6,7 @@ export default url => {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [
|
||||
,
|
||||
protocol = 'https:',
|
||||
username,
|
||||
password,
|
||||
ipv6,
|
||||
hostname = ipv6,
|
||||
port,
|
||||
] = matches
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
|
||||
@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
|
||||
import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find } from 'lodash'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
@@ -110,7 +110,7 @@ const main = async args => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
}).then(value => (isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
@@ -4,7 +4,7 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import { EventEmitter } from 'events'
|
||||
import { map, noop, omit } from 'lodash'
|
||||
import { isArray, map, noop, omit } from 'lodash'
|
||||
import {
|
||||
cancelable,
|
||||
defer,
|
||||
@@ -25,6 +25,7 @@ import isReadOnlyCall from './_isReadOnlyCall'
|
||||
import makeCallSetting from './_makeCallSetting'
|
||||
import parseUrl from './_parseUrl'
|
||||
import replaceSensitiveValues from './_replaceSensitiveValues'
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -112,7 +113,7 @@ export class Xapi extends EventEmitter {
|
||||
this._watchedTypes = undefined
|
||||
const { watchEvents } = opts
|
||||
if (watchEvents !== false) {
|
||||
if (Array.isArray(watchEvents)) {
|
||||
if (isArray(watchEvents)) {
|
||||
this._watchedTypes = watchEvents
|
||||
}
|
||||
this.watchEvents()
|
||||
@@ -625,7 +626,9 @@ export class Xapi extends EventEmitter {
|
||||
kindOf(result)
|
||||
)
|
||||
return result
|
||||
} catch (error) {
|
||||
} catch (e) {
|
||||
const error = e instanceof Error ? e : XapiError.wrap(e)
|
||||
|
||||
// do not log the session ID
|
||||
//
|
||||
// TODO: should log at the session level to avoid logging sensitive
|
||||
@@ -740,9 +743,9 @@ export class Xapi extends EventEmitter {
|
||||
// the event loop in that case
|
||||
if (this._pool.$ref !== oldPoolRef) {
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (
|
||||
await this._interruptOnDisconnect(this._call('system.listMethods'))
|
||||
)
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
@@ -1072,7 +1075,7 @@ export class Xapi extends EventEmitter {
|
||||
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
|
||||
|
||||
const value = data[field]
|
||||
if (Array.isArray(value)) {
|
||||
if (isArray(value)) {
|
||||
if (value.length === 0 || isOpaqueRef(value[0])) {
|
||||
getters[$field] = function() {
|
||||
const value = this[field]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import httpRequestPlus from 'http-request-plus'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
|
||||
@@ -32,7 +30,7 @@ export default ({ allowUnauthorized, url }) => {
|
||||
return response.result
|
||||
}
|
||||
|
||||
throw XapiError.wrap(response.error)
|
||||
throw response.error
|
||||
},
|
||||
error => {
|
||||
if (error.response !== undefined) {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
@@ -35,7 +33,7 @@ const parseResult = result => {
|
||||
}
|
||||
|
||||
if (status !== 'Success') {
|
||||
throw XapiError.wrap(result.ErrorDescription)
|
||||
throw result.ErrorDescription
|
||||
}
|
||||
|
||||
const value = result.Value
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import XapiError from '../_XapiError'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
|
||||
const logError = error => {
|
||||
@@ -28,7 +26,7 @@ const parseResult = result => {
|
||||
}
|
||||
|
||||
if (status !== 'Success') {
|
||||
throw XapiError.wrap(result.ErrorDescription)
|
||||
throw result.ErrorDescription
|
||||
}
|
||||
|
||||
return result.Value
|
||||
|
||||
@@ -26,28 +26,28 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "^7.0.0",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^3.0.0",
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"micromatch": "^3.1.3",
|
||||
"mkdirp": "^0.5.1",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^5.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -56,8 +56,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -7,6 +7,7 @@ const promisify = require('bluebird').promisify
|
||||
const readFile = promisify(require('fs').readFile)
|
||||
const writeFile = promisify(require('fs').writeFile)
|
||||
|
||||
const assign = require('lodash/assign')
|
||||
const l33t = require('l33teral')
|
||||
const mkdirp = promisify(require('mkdirp'))
|
||||
const xdgBasedir = require('xdg-basedir')
|
||||
@@ -40,7 +41,7 @@ const save = (exports.save = function(config) {
|
||||
|
||||
exports.set = function(data) {
|
||||
return load().then(function(config) {
|
||||
return save(Object.assign(config, data))
|
||||
return save(assign(config, data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ const getKeys = require('lodash/keys')
|
||||
const hrp = require('http-request-plus').default
|
||||
const humanFormat = require('human-format')
|
||||
const identity = require('lodash/identity')
|
||||
const isArray = require('lodash/isArray')
|
||||
const isObject = require('lodash/isObject')
|
||||
const micromatch = require('micromatch')
|
||||
const nicePipe = require('nice-pipe')
|
||||
@@ -198,18 +199,7 @@ function main(args) {
|
||||
return exports[fnName](args.slice(1))
|
||||
}
|
||||
|
||||
return exports.call(args).catch(error => {
|
||||
if (!(error != null && error.code === 10 && 'errors' in error.data)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const lines = [error.message]
|
||||
const { errors } = error.data
|
||||
errors.forEach(error => {
|
||||
lines.push(` property ${error.property}: ${error.message}`)
|
||||
})
|
||||
throw lines.join('\n')
|
||||
})
|
||||
return exports.call(args)
|
||||
}
|
||||
exports = module.exports = main
|
||||
|
||||
@@ -297,11 +287,7 @@ async function listCommands(args) {
|
||||
str.push(
|
||||
name,
|
||||
'=<',
|
||||
type == null
|
||||
? 'unknown type'
|
||||
: Array.isArray(type)
|
||||
? type.join('|')
|
||||
: type,
|
||||
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
|
||||
'>'
|
||||
)
|
||||
|
||||
@@ -386,7 +372,7 @@ async function call(args) {
|
||||
printProgress
|
||||
)
|
||||
|
||||
return fromCallback(pump, response, progress, output)
|
||||
return fromCallback(cb => pump(response, progress, output, cb))
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
|
||||
@@ -34,9 +34,9 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -260,10 +260,7 @@ describe('Collection', function() {
|
||||
forEach(
|
||||
{
|
||||
'add & update → add': [
|
||||
[
|
||||
['add', 'foo', 0],
|
||||
['update', 'foo', 1],
|
||||
],
|
||||
[['add', 'foo', 0], ['update', 'foo', 1]],
|
||||
{
|
||||
add: {
|
||||
foo: 1,
|
||||
@@ -271,19 +268,10 @@ describe('Collection', function() {
|
||||
},
|
||||
],
|
||||
|
||||
'add & remove → ∅': [
|
||||
[
|
||||
['add', 'foo', 0],
|
||||
['remove', 'foo'],
|
||||
],
|
||||
{},
|
||||
],
|
||||
'add & remove → ∅': [[['add', 'foo', 0], ['remove', 'foo']], {}],
|
||||
|
||||
'update & update → update': [
|
||||
[
|
||||
['update', 'bar', 1],
|
||||
['update', 'bar', 2],
|
||||
],
|
||||
[['update', 'bar', 1], ['update', 'bar', 2]],
|
||||
{
|
||||
update: {
|
||||
bar: 2,
|
||||
@@ -292,10 +280,7 @@ describe('Collection', function() {
|
||||
],
|
||||
|
||||
'update & remove → remove': [
|
||||
[
|
||||
['update', 'bar', 1],
|
||||
['remove', 'bar'],
|
||||
],
|
||||
[['update', 'bar', 1], ['remove', 'bar']],
|
||||
{
|
||||
remove: {
|
||||
bar: undefined,
|
||||
@@ -304,10 +289,7 @@ describe('Collection', function() {
|
||||
],
|
||||
|
||||
'remove & add → update': [
|
||||
[
|
||||
['remove', 'bar'],
|
||||
['add', 'bar', 0],
|
||||
],
|
||||
[['remove', 'bar'], ['add', 'bar', 0]],
|
||||
{
|
||||
update: {
|
||||
bar: 0,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import iteratee from 'lodash/iteratee'
|
||||
import { bind, iteratee } from 'lodash'
|
||||
|
||||
import clearObject from './clear-object'
|
||||
import isEmpty from './is-empty'
|
||||
@@ -17,9 +17,9 @@ export default class Index {
|
||||
this._keysToHash = Object.create(null)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
}
|
||||
|
||||
// This method is used to compute the hash under which an item must
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import iteratee from 'lodash/iteratee'
|
||||
import { bind, iteratee } from 'lodash'
|
||||
|
||||
import clearObject from './clear-object'
|
||||
import NotImplemented from './not-implemented'
|
||||
@@ -16,9 +16,9 @@ export default class UniqueIndex {
|
||||
this._keysToHash = Object.create(null)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
}
|
||||
|
||||
// This method is used to compute the hash under which an item must
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import createCallback from 'lodash/iteratee'
|
||||
import forEach from 'lodash/forEach'
|
||||
import { bind, forEach, iteratee as createCallback } from 'lodash'
|
||||
|
||||
import Collection, {
|
||||
ACTION_ADD,
|
||||
@@ -20,9 +19,9 @@ export default class View extends Collection {
|
||||
this._onAdd(this._collection.all)
|
||||
|
||||
// Bound versions of listeners.
|
||||
this._onAdd = this._onAdd.bind(this)
|
||||
this._onUpdate = this._onUpdate.bind(this)
|
||||
this._onRemove = this._onRemove.bind(this)
|
||||
this._onAdd = bind(this._onAdd, this)
|
||||
this._onUpdate = bind(this._onUpdate, this)
|
||||
this._onRemove = bind(this._onRemove, this)
|
||||
|
||||
// Register listeners.
|
||||
this._collection.on(ACTION_ADD, this._onAdd)
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { iteratee } from 'lodash'
|
||||
import { isArray, iteratee } from 'lodash'
|
||||
|
||||
class XoError extends BaseError {
|
||||
constructor({ code, message, data }) {
|
||||
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
|
||||
}))
|
||||
|
||||
export const invalidParameters = create(10, (message, errors) => {
|
||||
if (Array.isArray(message)) {
|
||||
if (isArray(message)) {
|
||||
errors = message
|
||||
message = undefined
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
"@types/node": "^12.0.2",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^9.0.0",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
"typescript": "^3.1.6"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -41,8 +41,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# xo-remote-parser [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
# ${pkg.name} [](https://travis-ci.org/${pkg.shortGitHubPath})
|
||||
|
||||
> ${pkg.description}
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-remote-parser):
|
||||
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
|
||||
|
||||
```
|
||||
> npm install --save xo-remote-parser
|
||||
> npm install --save ${pkg.name}
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -40,10 +40,10 @@ the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
- report any [issue](${pkg.bugs})
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL-3.0 © [Vates SAS](https://vates.fr)
|
||||
${pkg.license} © [${pkg.author.name}](${pkg.author.url})
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.6.6",
|
||||
"version": "0.6.5",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,18 +36,18 @@
|
||||
"dependencies": {
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"inquirer": "^7.0.0",
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* eslint no-throw-literal: 0 */
|
||||
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import noop from 'lodash/noop'
|
||||
import { bind, noop } from 'lodash'
|
||||
import { createClient } from 'ldapjs'
|
||||
import { escape } from 'ldapjs/lib/filters/escape'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
@@ -9,11 +9,6 @@ import { readFile } from 'fs'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const DEFAULTS = {
|
||||
checkCertificate: true,
|
||||
filter: '(uid={{name}})',
|
||||
}
|
||||
|
||||
const VAR_RE = /\{\{([^}]+)\}\}/g
|
||||
const evalFilter = (filter, vars) =>
|
||||
filter.replace(VAR_RE, (_, name) => {
|
||||
@@ -48,7 +43,7 @@ If not specified, it will use a default set of well-known CAs.
|
||||
description:
|
||||
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
|
||||
type: 'boolean',
|
||||
defaults: DEFAULTS.checkCertificate,
|
||||
default: true,
|
||||
},
|
||||
bind: {
|
||||
description: 'Credentials to use before looking for the user record.',
|
||||
@@ -81,11 +76,6 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
|
||||
description: `
|
||||
Filter used to find the user.
|
||||
|
||||
For LDAP if you want to filter for a special group you can try
|
||||
something like:
|
||||
|
||||
- \`(&(uid={{name}})(memberOf=<group DN>))\`
|
||||
|
||||
For Microsoft Active Directory, you can try one of the following filters:
|
||||
|
||||
- \`(cn={{name}})\`
|
||||
@@ -93,12 +83,13 @@ For Microsoft Active Directory, you can try one of the following filters:
|
||||
- \`(sAMAccountName={{name}}@<domain>)\` (replace \`<domain>\` by your own domain)
|
||||
- \`(userPrincipalName={{name}})\`
|
||||
|
||||
Or something like this if you also want to filter by group:
|
||||
For LDAP if you want to filter for a special group you can try
|
||||
something like:
|
||||
|
||||
- \`(&(sAMAccountName={{name}})(memberOf=<group DN>))\`
|
||||
- \`(&(uid={{name}})(memberOf=<group DN>))\`
|
||||
`.trim(),
|
||||
type: 'string',
|
||||
default: DEFAULTS.filter,
|
||||
default: '(uid={{name}})',
|
||||
},
|
||||
},
|
||||
required: ['uri', 'base'],
|
||||
@@ -125,7 +116,7 @@ class AuthLdap {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
|
||||
this._authenticate = this._authenticate.bind(this)
|
||||
this._authenticate = bind(this._authenticate, this)
|
||||
}
|
||||
|
||||
async configure(conf) {
|
||||
@@ -136,11 +127,7 @@ class AuthLdap {
|
||||
})
|
||||
|
||||
{
|
||||
const {
|
||||
bind,
|
||||
checkCertificate = DEFAULTS.checkCertificate,
|
||||
certificateAuthorities,
|
||||
} = conf
|
||||
const { bind, checkCertificate = true, certificateAuthorities } = conf
|
||||
|
||||
if (bind) {
|
||||
clientOpts.bindDN = bind.dn
|
||||
@@ -160,7 +147,7 @@ class AuthLdap {
|
||||
const {
|
||||
bind: credentials,
|
||||
base: searchBase,
|
||||
filter: searchFilter = DEFAULTS.filter,
|
||||
filter: searchFilter = '(uid={{name}})',
|
||||
} = conf
|
||||
|
||||
this._credentials = credentials
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { bind } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { readFile, writeFile } from 'fs'
|
||||
|
||||
@@ -16,7 +17,7 @@ const CACHE_FILE = './ldap.cache.conf'
|
||||
execPromise(async args => {
|
||||
const config = await promptSchema(
|
||||
configurationSchema,
|
||||
await fromCallback(readFile, CACHE_FILE, 'utf-8').then(
|
||||
await fromCallback(cb => readFile(CACHE_FILE, 'utf-8', cb)).then(
|
||||
JSON.parse,
|
||||
() => ({})
|
||||
)
|
||||
@@ -43,6 +44,6 @@ execPromise(async args => {
|
||||
}),
|
||||
password: await password('Password'),
|
||||
},
|
||||
console.log.bind(console)
|
||||
bind(console.log, console)
|
||||
)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.7.0",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -40,8 +40,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -2,10 +2,6 @@ import { Strategy } from 'passport-saml'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const DEFAULTS = {
|
||||
disableRequestedAuthnContext: false,
|
||||
}
|
||||
|
||||
export const configurationSchema = {
|
||||
description:
|
||||
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
|
||||
@@ -34,11 +30,6 @@ You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddr
|
||||
`,
|
||||
type: 'string',
|
||||
},
|
||||
disableRequestedAuthnContext: {
|
||||
title: "Don't request an authentication context",
|
||||
description: 'This is known to help when using Active Directory',
|
||||
default: DEFAULTS.disableRequestedAuthnContext,
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
@@ -55,7 +46,6 @@ class AuthSamlXoPlugin {
|
||||
configure({ usernameField, ...conf }) {
|
||||
this._usernameField = usernameField
|
||||
this._conf = {
|
||||
...DEFAULTS,
|
||||
...conf,
|
||||
|
||||
// must match the callback URL
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.16.4",
|
||||
"version": "0.16.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,8 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -49,8 +48,8 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -2,7 +2,6 @@ import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import { get } from '@xen-orchestra/defined'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
@@ -187,7 +186,7 @@ const MARKDOWN_BY_TYPE = {
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[task.data?.type]?.(task, props)
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
@@ -318,7 +317,6 @@ class BackupReportsXoPlugin {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
xo,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
@@ -356,7 +354,7 @@ class BackupReportsXoPlugin {
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
success: log.status === 'success',
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
@@ -366,10 +364,9 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName, settings }, schedule, force) {
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const mailReceivers = get(() => settings[''].reportRecipients)
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
@@ -392,9 +389,8 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
success: false,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
@@ -646,12 +642,11 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
return this._sendReport({
|
||||
mailReceivers,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
success: log.status === 'success',
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
@@ -661,18 +656,12 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport({
|
||||
mailReceivers = this._mailsReceivers,
|
||||
markdown,
|
||||
nagiosMarkdown,
|
||||
subject,
|
||||
success,
|
||||
}) {
|
||||
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: mailReceivers,
|
||||
to: this._mailsReceivers,
|
||||
subject,
|
||||
markdown,
|
||||
}),
|
||||
@@ -687,14 +676,9 @@ class BackupReportsXoPlugin {
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: success ? 0 : 2,
|
||||
status: nagiosStatus,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
xo.sendIcinga2Status !== undefined &&
|
||||
xo.sendIcinga2Status({
|
||||
status: success ? 'OK' : 'CRITICAL',
|
||||
message: markdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -724,7 +708,7 @@ class BackupReportsXoPlugin {
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
success: false,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
@@ -920,7 +904,7 @@ class BackupReportsXoPlugin {
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
success: globalSuccess,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
# xo-server-web-hooks [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# xo-server-cloud [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -26,7 +30,7 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
@@ -1,21 +1,20 @@
|
||||
{
|
||||
"name": "xo-server-web-hooks",
|
||||
"version": "0.1.0",
|
||||
"license": "AGPL-3.0",
|
||||
"name": "xo-server-cloud",
|
||||
"version": "0.2.4",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
"hooks",
|
||||
"cloud",
|
||||
"orchestra",
|
||||
"plugin",
|
||||
"web",
|
||||
"xen",
|
||||
"xen-orchestra",
|
||||
"xo-server"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-web-hooks",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-web-hooks",
|
||||
"directory": "packages/xo-server-cloud",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -30,25 +29,24 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"lodash": "^4.17.15"
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.0",
|
||||
"@babel/core": "^7.7.2",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.6.0",
|
||||
"@babel/preset-env": "^7.7.1",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.5.4"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
158
packages/xo-server-cloud/src/index.js
Normal file
158
packages/xo-server-cloud/src/index.js
Normal file
@@ -0,0 +1,158 @@
|
||||
import Client, { createBackoff } from 'jsonrpc-websocket-client'
|
||||
import hrp from 'http-request-plus'
|
||||
|
||||
const WS_URL = 'ws://localhost:9001'
|
||||
const HTTP_URL = 'http://localhost:9002'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class XoServerCloud {
|
||||
constructor({ xo }) {
|
||||
this._xo = xo
|
||||
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._key = null
|
||||
}
|
||||
|
||||
configure(configuration) {
|
||||
this._conf = configuration
|
||||
}
|
||||
|
||||
async load() {
|
||||
const getResourceCatalog = () => this._getCatalog()
|
||||
getResourceCatalog.description = 'Get the list of all available resources'
|
||||
getResourceCatalog.permission = 'admin'
|
||||
|
||||
const registerResource = ({ namespace }) =>
|
||||
this._registerResource(namespace)
|
||||
registerResource.description = 'Register a resource via cloud plugin'
|
||||
registerResource.params = {
|
||||
namespace: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
registerResource.permission = 'admin'
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
cloud: {
|
||||
getResourceCatalog,
|
||||
registerResource,
|
||||
},
|
||||
})
|
||||
this._unsetRequestResource = this._xo.defineProperty(
|
||||
'requestResource',
|
||||
this._requestResource,
|
||||
this
|
||||
)
|
||||
|
||||
const updater = (this._updater = new Client(WS_URL))
|
||||
const connect = () =>
|
||||
updater.open(createBackoff()).catch(error => {
|
||||
console.error('xo-server-cloud: fail to connect to updater', error)
|
||||
|
||||
return connect()
|
||||
})
|
||||
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
|
||||
console.warn('xo-server-cloud: next attempt in %s ms', delay)
|
||||
})
|
||||
connect()
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._unsetApiMethods()
|
||||
this._unsetRequestResource()
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getCatalog() {
|
||||
const catalog = await this._updater.call('getResourceCatalog')
|
||||
|
||||
if (!catalog) {
|
||||
throw new Error('cannot get catalog')
|
||||
}
|
||||
|
||||
return catalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaces() {
|
||||
const catalog = await this._getCatalog()
|
||||
|
||||
if (!catalog._namespaces) {
|
||||
throw new Error('cannot get namespaces')
|
||||
}
|
||||
|
||||
return catalog._namespaces
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _registerResource(namespace) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (_namespace === undefined) {
|
||||
throw new Error(`${namespace} is not available`)
|
||||
}
|
||||
|
||||
if (_namespace.registered || _namespace.pending) {
|
||||
throw new Error(`already registered for ${namespace}`)
|
||||
}
|
||||
|
||||
return this._updater.call('registerResource', { namespace })
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaceCatalog(namespace) {
|
||||
const namespaceCatalog = (await this._getCatalog())[namespace]
|
||||
|
||||
if (!namespaceCatalog) {
|
||||
throw new Error(`cannot get catalog: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
return namespaceCatalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _requestResource(namespace, id, version) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (!_namespace || !_namespace.registered) {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
if (!downloadToken) {
|
||||
throw new Error('cannot get download token')
|
||||
}
|
||||
|
||||
const response = await hrp(HTTP_URL, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${downloadToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
// currently needed for XenApi#putResource()
|
||||
response.length = response.headers['content-length']
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new XoServerCloud(opts)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user