Compare commits
4 Commits
xo-server-
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8f2b74b5e | ||
|
|
4535c81b44 | ||
|
|
f39312e789 | ||
|
|
6aad769995 |
11
.eslintrc.js
11
.eslintrc.js
@@ -21,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -38,15 +38,6 @@ module.exports = {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'react/jsx-handler-names': 'off',
|
||||
|
||||
// disabled because not always relevant, we might reconsider in the future
|
||||
//
|
||||
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
|
||||
//
|
||||
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,378 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const args = process.argv.slice(2)
|
||||
|
||||
if (
|
||||
args.length === 0 ||
|
||||
/^(?:-h|--help)$/.test(args[0]) ||
|
||||
args[0] !== 'clean-vms'
|
||||
) {
|
||||
console.log('Usage: xo-backups clean-vms [--force] xo-vm-backups/*')
|
||||
// eslint-disable-next-line no-process-exit
|
||||
return process.exit(1)
|
||||
}
|
||||
|
||||
// remove `clean-vms` arg which is the only available command ATM
|
||||
args.splice(0, 1)
|
||||
|
||||
// only act (ie delete files) if `--force` is present
|
||||
const force = args[0] === '--force'
|
||||
if (force) {
|
||||
args.splice(0, 1)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const lockfile = require('proper-lockfile')
|
||||
const { default: Vhd } = require('vhd-lib')
|
||||
const { curryRight, flatten } = require('lodash')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { pipe, promisifyAll } = require('promise-toolbox')
|
||||
|
||||
const fs = promisifyAll(require('fs'))
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const asyncMap = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
|
||||
const filter = (...args) => thisArg => thisArg.filter(...args)
|
||||
|
||||
// TODO: better check?
|
||||
|
||||
// our heuristic is not good enough, there has been some false positives
|
||||
// (detected as invalid by us but valid by `tar` and imported with success),
|
||||
// either:
|
||||
// - these files were normal but the check is incorrect
|
||||
// - these files were invalid but without data loss
|
||||
// - these files were invalid but with silent data loss
|
||||
//
|
||||
// FIXME: the heuristic does not work if the XVA is compressed, we need to
|
||||
// implement a specific test for it
|
||||
//
|
||||
// maybe reading the end of the file looking for a file named
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async path => {
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
await fs.read(fd, buf, 0, buf.length, size - buf.length),
|
||||
buf.length
|
||||
)
|
||||
return buf.every(_ => _ === 0)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidTar', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const readDir = path =>
|
||||
fs.readdir(path).then(
|
||||
entries => {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = `${path}/${entry}`
|
||||
})
|
||||
|
||||
return entries
|
||||
},
|
||||
error => {
|
||||
// a missing dir is by definition empty
|
||||
if (error != null && error.code === 'ENOENT') {
|
||||
return []
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
const child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
return console.warn('TODO: implement merge')
|
||||
// await mergeVhd(
|
||||
// handler,
|
||||
// parent,
|
||||
// handler,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children)
|
||||
// )
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
force && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
readDir,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
asyncMap(readDir),
|
||||
flatten,
|
||||
filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas] = await readDir(vmDir).then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidTar(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
asyncMap(args, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
}
|
||||
}).catch(error => console.error('main', error))
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"bin": {
|
||||
"xo-backups": "index.js"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/backups-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.0.0"
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.27.2"
|
||||
"xen-api": "^0.27.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.6",
|
||||
"version": "1.0.3",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -46,8 +46,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -7,21 +7,7 @@ const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
class Job {
|
||||
constructor(schedule, fn) {
|
||||
let scheduledDate
|
||||
const wrapper = () => {
|
||||
const now = Date.now()
|
||||
if (scheduledDate > now) {
|
||||
// we're early, delay
|
||||
//
|
||||
// no need to check _isEnabled, we're just delaying the existing timeout
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4625
|
||||
this._timeout = setTimeout(wrapper, scheduledDate - now)
|
||||
return
|
||||
}
|
||||
|
||||
this._isRunning = true
|
||||
|
||||
let result
|
||||
try {
|
||||
result = fn()
|
||||
@@ -36,36 +22,23 @@ class Job {
|
||||
}
|
||||
}
|
||||
const scheduleNext = () => {
|
||||
this._isRunning = false
|
||||
|
||||
if (this._isEnabled) {
|
||||
const now = schedule._createDate()
|
||||
scheduledDate = +next(schedule._schedule, now)
|
||||
const delay = scheduledDate - now
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
const delay = schedule._nextDelay()
|
||||
this._timeout =
|
||||
delay < MAX_DELAY
|
||||
? setTimeout(wrapper, delay)
|
||||
: setTimeout(scheduleNext, MAX_DELAY)
|
||||
}
|
||||
|
||||
this._isEnabled = false
|
||||
this._isRunning = false
|
||||
this._scheduleNext = scheduleNext
|
||||
this._timeout = undefined
|
||||
}
|
||||
|
||||
start() {
|
||||
this.stop()
|
||||
|
||||
this._isEnabled = true
|
||||
if (!this._isRunning) {
|
||||
this._scheduleNext()
|
||||
}
|
||||
this._scheduleNext()
|
||||
}
|
||||
|
||||
stop() {
|
||||
this._isEnabled = false
|
||||
clearTimeout(this._timeout)
|
||||
}
|
||||
}
|
||||
@@ -95,6 +68,11 @@ class Schedule {
|
||||
return dates
|
||||
}
|
||||
|
||||
_nextDelay() {
|
||||
const now = this._createDate()
|
||||
return next(this._schedule, now) - now
|
||||
}
|
||||
|
||||
startJob(fn) {
|
||||
const job = this.createJob(fn)
|
||||
job.start()
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createSchedule } from './'
|
||||
|
||||
describe('issues', () => {
|
||||
test('stop during async execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(1)
|
||||
})
|
||||
|
||||
test('stop then start during async job execution', async () => {
|
||||
let nCalls = 0
|
||||
let resolve, promise
|
||||
|
||||
const job = createSchedule('* * * * *').createJob(() => {
|
||||
++nCalls
|
||||
|
||||
// eslint-disable-next-line promise/param-names
|
||||
promise = new Promise(r => {
|
||||
resolve = r
|
||||
})
|
||||
return promise
|
||||
})
|
||||
|
||||
job.start()
|
||||
jest.runAllTimers()
|
||||
|
||||
expect(nCalls).toBe(1)
|
||||
|
||||
job.stop()
|
||||
job.start()
|
||||
|
||||
resolve()
|
||||
await promise
|
||||
|
||||
jest.runAllTimers()
|
||||
expect(nCalls).toBe(2)
|
||||
})
|
||||
})
|
||||
@@ -34,8 +34,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.10.1",
|
||||
"version": "0.10.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -30,7 +30,7 @@
|
||||
"get-stream": "^4.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.1.0",
|
||||
@@ -46,10 +46,10 @@
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/log",
|
||||
"version": "0.2.0",
|
||||
"version": "0.1.4",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -31,16 +31,16 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -48,7 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,7 @@ const createTransport = config => {
|
||||
}
|
||||
}
|
||||
|
||||
let { filter } = config
|
||||
let transport = createTransport(config.transport)
|
||||
let { filter, transport } = config
|
||||
const level = resolve(config.level)
|
||||
|
||||
if (filter !== undefined) {
|
||||
@@ -52,12 +51,11 @@ const symbol =
|
||||
? Symbol.for('@xen-orchestra/log')
|
||||
: '@@@xen-orchestra/log'
|
||||
|
||||
const { env } = process
|
||||
global[symbol] = createTransport({
|
||||
// display warnings or above, and all that are enabled via DEBUG or
|
||||
// NODE_DEBUG env
|
||||
filter: [env.DEBUG, env.NODE_DEBUG].filter(Boolean).join(','),
|
||||
level: resolve(env.LOG_LEVEL, LEVELS.INFO),
|
||||
filter: process.env.DEBUG || process.env.NODE_DEBUG,
|
||||
level: LEVELS.INFO,
|
||||
|
||||
transport: createConsoleTransport(),
|
||||
})
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import createTransport from './transports/console'
|
||||
import LEVELS, { resolve } from './levels'
|
||||
import LEVELS from './levels'
|
||||
|
||||
const symbol =
|
||||
typeof Symbol !== 'undefined'
|
||||
@@ -9,8 +9,7 @@ if (!(symbol in global)) {
|
||||
// the default behavior, without requiring `configure` is to avoid
|
||||
// logging anything unless it's a real error
|
||||
const transport = createTransport()
|
||||
const level = resolve(process.env.LOG_LEVEL, LEVELS.WARN)
|
||||
global[symbol] = log => log.level >= level && transport(log)
|
||||
global[symbol] = log => log.level > LEVELS.WARN && transport(log)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -73,5 +72,5 @@ prototype.wrap = function(message, fn) {
|
||||
}
|
||||
}
|
||||
|
||||
export const createLogger = namespace => new Logger(namespace)
|
||||
const createLogger = namespace => new Logger(namespace)
|
||||
export { createLogger as default }
|
||||
|
||||
@@ -13,22 +13,11 @@ for (const name in LEVELS) {
|
||||
NAMES[LEVELS[name]] = name
|
||||
}
|
||||
|
||||
// resolves to the number representation of a level
|
||||
//
|
||||
// returns `defaultLevel` if invalid
|
||||
export const resolve = (level, defaultLevel) => {
|
||||
const type = typeof level
|
||||
if (type === 'number') {
|
||||
if (level in NAMES) {
|
||||
return level
|
||||
}
|
||||
} else if (type === 'string') {
|
||||
const nLevel = LEVELS[level.toUpperCase()]
|
||||
if (nLevel !== undefined) {
|
||||
return nLevel
|
||||
}
|
||||
export const resolve = level => {
|
||||
if (typeof level === 'string') {
|
||||
level = LEVELS[level.toUpperCase()]
|
||||
}
|
||||
return defaultLevel
|
||||
return level
|
||||
}
|
||||
|
||||
Object.freeze(LEVELS)
|
||||
|
||||
@@ -1,23 +1,26 @@
|
||||
import LEVELS, { NAMES } from '../levels'
|
||||
|
||||
// Bind console methods (necessary for browsers)
|
||||
/* eslint-disable no-console */
|
||||
const debugConsole = console.log.bind(console)
|
||||
const infoConsole = console.info.bind(console)
|
||||
const warnConsole = console.warn.bind(console)
|
||||
const errorConsole = console.error.bind(console)
|
||||
/* eslint-enable no-console */
|
||||
|
||||
const { ERROR, INFO, WARN } = LEVELS
|
||||
|
||||
const consoleTransport = ({ data, level, namespace, message, time }) => {
|
||||
const fn =
|
||||
/* eslint-disable no-console */
|
||||
level < INFO
|
||||
? console.log
|
||||
? debugConsole
|
||||
: level < WARN
|
||||
? console.info
|
||||
? infoConsole
|
||||
: level < ERROR
|
||||
? console.warn
|
||||
: console.error
|
||||
/* eslint-enable no-console */
|
||||
? warnConsole
|
||||
: errorConsole
|
||||
|
||||
const args = [time.toISOString(), namespace, NAMES[level], message]
|
||||
if (data != null) {
|
||||
args.push(data)
|
||||
}
|
||||
fn.apply(console, args)
|
||||
fn('%s - %s - [%s] %s', time.toISOString(), namespace, NAMES[level], message)
|
||||
data != null && fn(data)
|
||||
}
|
||||
export default () => consoleTransport
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-dev": "^1.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
# @xen-orchestra/template [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/template):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/template
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Create a string replacer based on a pattern and a list of rules.
|
||||
|
||||
```js
|
||||
const myReplacer = compileTemplate('{name}_COPY_\{name}_{id}_%\%', {
|
||||
'{name}': vm => vm.name_label,
|
||||
'{id}': vm => vm.id,
|
||||
'%': (_, i) => i
|
||||
})
|
||||
|
||||
const newString = myReplacer({
|
||||
name_label: 'foo',
|
||||
id: 42,
|
||||
}, 32)
|
||||
|
||||
newString === 'foo_COPY_{name}_42_32%' // true
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> yarn
|
||||
|
||||
# Run the tests
|
||||
> yarn test
|
||||
|
||||
# Continuously compile
|
||||
> yarn dev
|
||||
|
||||
# Continuously run the tests
|
||||
> yarn dev-test
|
||||
|
||||
# Build for production (automatically called by npm install)
|
||||
> yarn build
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](https://vates.fr)
|
||||
@@ -1,19 +0,0 @@
|
||||
import escapeRegExp from 'lodash/escapeRegExp'
|
||||
|
||||
const compareLengthDesc = (a, b) => b.length - a.length
|
||||
|
||||
export function compileTemplate(pattern, rules) {
|
||||
const matches = Object.keys(rules)
|
||||
.sort(compareLengthDesc)
|
||||
.map(escapeRegExp)
|
||||
.join('|')
|
||||
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
|
||||
return (...params) =>
|
||||
pattern.replace(regExp, match => {
|
||||
if (match[0] === '\\') {
|
||||
return match.slice(1)
|
||||
}
|
||||
const rule = rules[match]
|
||||
return typeof rule === 'function' ? rule(...params) : rule
|
||||
})
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
import { compileTemplate } from '.'
|
||||
|
||||
it("correctly replaces the template's variables", () => {
|
||||
const replacer = compileTemplate(
|
||||
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
|
||||
{
|
||||
'{property}': obj => obj.name,
|
||||
'{constant}': 1235,
|
||||
'%': (_, i) => i,
|
||||
}
|
||||
)
|
||||
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
|
||||
})
|
||||
185
CHANGELOG.md
185
CHANGELOG.md
@@ -2,195 +2,16 @@
|
||||
|
||||
## **next**
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- `xo-server` requires Node 8.
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
|
||||
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
|
||||
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
|
||||
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
|
||||
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
|
||||
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
|
||||
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
|
||||
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
|
||||
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/cron v1.0.5
|
||||
- xo-server-transport-icinga2 v0.1.0
|
||||
- xo-server-sdn-controller v0.3.1
|
||||
- xo-server v5.51.0
|
||||
- xo-web v5.51.0
|
||||
|
||||
### Dropped packages
|
||||
|
||||
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
|
||||
|
||||
|
||||
## **5.39.1** (2019-10-11)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
|
||||
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-web v5.50.3
|
||||
|
||||
|
||||
## **5.39.0** (2019-09-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
|
||||
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
|
||||
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
|
||||
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/new] Clarify address formats [#4450](https://github.com/vatesfr/xen-orchestra/issues/4450) (PR [#4460](https://github.com/vatesfr/xen-orchestra/pull/4460))
|
||||
- [Backup NG/New] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4411](https://github.com/vatesfr/xen-orchestra/pull/4411))
|
||||
- [VM/disks] Don't hide disks that are attached to the same VM twice [#4400](https://github.com/vatesfr/xen-orchestra/issues/4400) (PR [#4414](https://github.com/vatesfr/xen-orchestra/pull/4414))
|
||||
- [SDN Controller] Ability to configure MTU for private networks (PR [#4491](https://github.com/vatesfr/xen-orchestra/pull/4491))
|
||||
- [VM Export] Filenames are now prefixed with datetime [#4503](https://github.com/vatesfr/xen-orchestra/issues/4503)
|
||||
- [Settings/Logs] Differenciate XS/XCP-ng errors from XO errors [#4101](https://github.com/vatesfr/xen-orchestra/issues/4101) (PR [#4385](https://github.com/vatesfr/xen-orchestra/pull/4385))
|
||||
- [Backups] Improve performance by caching logs consolidation (PR [#4541](https://github.com/vatesfr/xen-orchestra/pull/4541))
|
||||
- [New VM] Cloud Init available for all plans (PR [#4543](https://github.com/vatesfr/xen-orchestra/pull/4543))
|
||||
- [Servers] IPv6 addresses can be used [#4520](https://github.com/vatesfr/xen-orchestra/issues/4520) (PR [#4521](https://github.com/vatesfr/xen-orchestra/pull/4521)) \
|
||||
Note: They must enclosed in brackets to differentiate with the port, e.g.: `[2001:db8::7334]` or `[ 2001:db8::7334]:4343`
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [PBD] Obfuscate cifs password from device config [#4384](https://github.com/vatesfr/xen-orchestra/issues/4384) (PR [#4401](https://github.com/vatesfr/xen-orchestra/pull/4401))
|
||||
- [XOSAN] Fix "invalid parameters" error on creating a SR (PR [#4478](https://github.com/vatesfr/xen-orchestra/pull/4478))
|
||||
- [Patching] Avoid overloading XCP-ng by reducing the frequency of yum update checks [#4358](https://github.com/vatesfr/xen-orchestra/issues/4358) (PR [#4477](https://github.com/vatesfr/xen-orchestra/pull/4477))
|
||||
- [Network] Fix inability to create a bonded network (PR [#4489](https://github.com/vatesfr/xen-orchestra/pull/4489))
|
||||
- [Backup restore & Replication] Don't copy `sm_config` to new VDIs which might leads to useless coalesces [#4482](https://github.com/vatesfr/xen-orchestra/issues/4482) (PR [#4484](https://github.com/vatesfr/xen-orchestra/pull/4484))
|
||||
- [Home] Fix intermediary "no results" display showed on filtering items [#4420](https://github.com/vatesfr/xen-orchestra/issues/4420) (PR [#4456](https://github.com/vatesfr/xen-orchestra/pull/4456)
|
||||
- [Backup NG/New schedule] Properly show user errors in the form [#3831](https://github.com/vatesfr/xen-orchestra/issues/3831) (PR [#4131](https://github.com/vatesfr/xen-orchestra/pull/4131))
|
||||
- [VM/Advanced] Fix `"vm.set_domain_type" is not a function` error on switching virtualization mode (PV/HVM) [#4348](https://github.com/vatesfr/xen-orchestra/issues/4348) (PR [#4504](https://github.com/vatesfr/xen-orchestra/pull/4504))
|
||||
- [Backup NG/logs] Show warning when zstd compression is selected but not supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4375](https://github.com/vatesfr/xen-orchestra/pull/4375)
|
||||
- [Patches] Fix patches installation for CH 8.0 (PR [#4511](https://github.com/vatesfr/xen-orchestra/pull/4511))
|
||||
- [Network] Fix inability to set a network name [#4514](https://github.com/vatesfr/xen-orchestra/issues/4514) (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [Backup NG] Fix race conditions that could lead to disabled jobs still running (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
|
||||
- [XOA] Remove "Updates" and "Licenses" tabs for non admin users (PR [#4526](https://github.com/vatesfr/xen-orchestra/pull/4526))
|
||||
- [New VM] Ability to escape [cloud config template](https://xen-orchestra.com/blog/xen-orchestra-5-21/#cloudconfigtemplates) variables [#4486](https://github.com/vatesfr/xen-orchestra/issues/4486) (PR [#4501](https://github.com/vatesfr/xen-orchestra/pull/4501))
|
||||
- [Backup NG] Properly log and report if job is already running [#4497](https://github.com/vatesfr/xen-orchestra/issues/4497) (PR [4534](https://github.com/vatesfr/xen-orchestra/pull/4534))
|
||||
- [Host] Fix an issue where host was wrongly reporting time inconsistency (PR [#4540](https://github.com/vatesfr/xen-orchestra/pull/4540))
|
||||
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.27.2
|
||||
- xo-server-cloud v0.3.0
|
||||
- @xen-orchestra/cron v1.0.4
|
||||
- xo-server-sdn-controller v0.3.0
|
||||
- @xen-orchestra/template v0.1.0
|
||||
- xo-server v5.50.1
|
||||
- xo-web v5.50.2
|
||||
|
||||
|
||||
## **5.38.0** (2019-08-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
|
||||
- [Zstd]
|
||||
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
|
||||
- [VM/Bulk copy] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4346](https://github.com/vatesfr/xen-orchestra/pull/4346))
|
||||
- [VM import & Continuous Replication] Enable `guessVhdSizeOnImport` by default, this fix some `VDI_IO_ERROR` with XenServer 7.1 and XCP-ng 8.0 (PR [#4436](https://github.com/vatesfr/xen-orchestra/pull/4436))
|
||||
- [SDN Controller] Add possibility to create multiple GRE networks and VxLAN networks within a same pool (PR [#4435](https://github.com/vatesfr/xen-orchestra/pull/4435))
|
||||
- [SDN Controller] Add possibility to create cross-pool private networks (PR [#4405](https://github.com/vatesfr/xen-orchestra/pull/4405))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
|
||||
- [VM/Attach disk] Fix checking VDI mode (PR [#4373](https://github.com/vatesfr/xen-orchestra/pull/4373))
|
||||
- [VM revert] Snapshot before: add admin ACLs on created snapshot [#4331](https://github.com/vatesfr/xen-orchestra/issues/4331) (PR [#4391](https://github.com/vatesfr/xen-orchestra/pull/4391))
|
||||
- [Network] Fixed "invalid parameters" error when creating bonded network [#4425](https://github.com/vatesfr/xen-orchestra/issues/4425) (PR [#4429](https://github.com/vatesfr/xen-orchestra/pull/4429))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.2.0
|
||||
- xo-server-usage-report v0.7.3
|
||||
- xo-server v5.48.0
|
||||
- xo-web v5.48.1
|
||||
|
||||
## **5.37.1** (2019-08-06)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
|
||||
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-sdn-controller v0.1.2
|
||||
|
||||
## **5.37.0** (2019-07-25)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
|
||||
- [SR/General] Improve SR usage graph [#3608](https://github.com/vatesfr/xen-orchestra/issues/3608) (PR [#3830](https://github.com/vatesfr/xen-orchestra/pull/3830))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [Backup NG] Ability to bypass unhealthy VDI chains check [#4324](https://github.com/vatesfr/xen-orchestra/issues/4324) (PR [#4340](https://github.com/vatesfr/xen-orchestra/pull/4340))
|
||||
- [VM/console] Multiline copy/pasting [#4261](https://github.com/vatesfr/xen-orchestra/issues/4261) (PR [#4341](https://github.com/vatesfr/xen-orchestra/pull/4341))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
|
||||
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
|
||||
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
|
||||
- [Backup NG/New] Generate default schedule if no schedule is specified [#4036](https://github.com/vatesfr/xen-orchestra/issues/4036) (PR [#4183](https://github.com/vatesfr/xen-orchestra/pull/4183))
|
||||
- [Host/Advanced] Ability to edit iSCSI IQN [#4048](https://github.com/vatesfr/xen-orchestra/issues/4048) (PR [#4208](https://github.com/vatesfr/xen-orchestra/pull/4208))
|
||||
- [VM,host] Improved state icons/pills (colors and tooltips) (PR [#4363](https://github.com/vatesfr/xen-orchestra/pull/4363))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Settings/Servers] Fix read-only setting toggling
|
||||
- [SDN Controller] Do not choose physical PIF without IP configuration for tunnels. (PR [#4319](https://github.com/vatesfr/xen-orchestra/pull/4319))
|
||||
- [Xen servers] Fix `no connection found for object` error if pool master is reinstalled [#4299](https://github.com/vatesfr/xen-orchestra/issues/4299) (PR [#4302](https://github.com/vatesfr/xen-orchestra/pull/4302))
|
||||
- [Backup-ng/restore] Display correct size for full VM backup [#4316](https://github.com/vatesfr/xen-orchestra/issues/4316) (PR [#4332](https://github.com/vatesfr/xen-orchestra/pull/4332))
|
||||
- [VM/tab-advanced] Fix CPU limits edition (PR [#4337](https://github.com/vatesfr/xen-orchestra/pull/4337))
|
||||
- [Remotes] Fix `EIO` errors due to massive parallel fs operations [#4323](https://github.com/vatesfr/xen-orchestra/issues/4323) (PR [#4330](https://github.com/vatesfr/xen-orchestra/pull/4330))
|
||||
- [VM/Advanced] Fix virtualization mode switch (PV/HVM) (PR [#4349](https://github.com/vatesfr/xen-orchestra/pull/4349))
|
||||
- [Task] fix hidden notification by search field [#3874](https://github.com/vatesfr/xen-orchestra/issues/3874) (PR [#4305](https://github.com/vatesfr/xen-orchestra/pull/4305)
|
||||
- [VM] Fail to change affinity (PR [#4361](https://github.com/vatesfr/xen-orchestra/pull/4361)
|
||||
- [VM] Number of CPUs not correctly changed on running VMs (PR [#4360](https://github.com/vatesfr/xen-orchestra/pull/4360)
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs v0.10.1
|
||||
- xo-server-sdn-controller v0.1.1
|
||||
- xen-api v0.27.1
|
||||
- xo-server v5.46.0
|
||||
- xo-web v5.46.0
|
||||
|
||||
## **5.36.0** (2019-06-27)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
@@ -228,6 +49,8 @@
|
||||
|
||||
## **5.35.0** (2019-05-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
|
||||
@@ -7,11 +7,21 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
|
||||
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
|
||||
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
|
||||
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Settings/Servers] Fix read-only setting toggling
|
||||
- [SDN Controller] Do not choose physical PIF without IP configuration for tunnels. (PR [#4319](https://github.com/vatesfr/xen-orchestra/pull/4319))
|
||||
- [Xen servers] Fix `no connection found for object` error if pool master is reinstalled [#4299](https://github.com/vatesfr/xen-orchestra/issues/4299) (PR [#4302](https://github.com/vatesfr/xen-orchestra/pull/4302))
|
||||
- [Backup-ng/restore] Display correct size for full VM backup [#4316](https://github.com/vatesfr/xen-orchestra/issues/4316) (PR [#4332](https://github.com/vatesfr/xen-orchestra/pull/4332))
|
||||
- [VM/tab-advanced] Fix CPU limits edition (PR [#4337](https://github.com/vatesfr/xen-orchestra/pull/4337))
|
||||
- [Remotes] Fix `EIO` errors due to massive parallel fs operations [#4323](https://github.com/vatesfr/xen-orchestra/issues/4323) (PR [#4330](https://github.com/vatesfr/xen-orchestra/pull/4330))
|
||||
|
||||
### Released packages
|
||||
|
||||
@@ -20,6 +30,8 @@
|
||||
>
|
||||
> Rule of thumb: add packages on top.
|
||||
|
||||
- @xen-orchestra/cron v1.0.6
|
||||
- xo-server v5.52.0
|
||||
- xo-web v5.52.0
|
||||
- @xen-orchestra/fs v0.10.0
|
||||
- xo-server-sdn-controller v0.1.1
|
||||
- xen-api v0.26.0
|
||||
- xo-server v5.45.0
|
||||
- xo-web v5.45.0
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
### Check list
|
||||
|
||||
> Check if done.
|
||||
>
|
||||
> Strikethrough if not relevant: ~~example~~ ([doc](https://help.github.com/en/articles/basic-writing-and-formatting-syntax)).
|
||||
> Check items when done or if not relevant
|
||||
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007` or `See xoa-support#42`)
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
|
||||
- [ ] if UI changes, a screenshot has been added to the PR
|
||||
- [ ] `CHANGELOG.unreleased.md`:
|
||||
- enhancement/bug fix entry added
|
||||
- list of packages to release updated (`${name} v${new version}`)
|
||||
- [ ] documentation updated
|
||||
- `CHANGELOG.unreleased.md`:
|
||||
- [ ] enhancement/bug fix entry added
|
||||
- [ ] list of packages to release updated (`${name} v${new version}`)
|
||||
- **I have tested added/updated features** (and impacted code)
|
||||
- [ ] unit tests (e.g. [`cron/parse.spec.js`](https://github.com/vatesfr/xen-orchestra/blob/b24400b21de1ebafa1099c56bac1de5c988d9202/%40xen-orchestra/cron/src/parse.spec.js))
|
||||
- [ ] if `xo-server` API changes, the corresponding test has been added to/updated on [`xo-server-test`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test)
|
||||
- [ ] at least manual testing
|
||||
- [ ] **I have tested added/updated features** (and impacted code)
|
||||
|
||||
### Process
|
||||
|
||||
@@ -21,10 +16,3 @@
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
From [_the Four Agreements_](https://en.wikipedia.org/wiki/Don_Miguel_Ruiz#The_Four_Agreements):
|
||||
|
||||
1. Be impeccable with your word.
|
||||
1. Don't take anything personally.
|
||||
1. Don't make assumptions.
|
||||
1. Always do your best.
|
||||
|
||||
@@ -13,11 +13,11 @@ It aims to be easy to use on any device supporting modern web technologies (HTML
|
||||
|
||||
## XOA quick deploy
|
||||
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
SSH to your XenServer, and execute the following:
|
||||
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
|
||||
[](https://xen-orchestra.com/#!/xoa)
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
### XOA credentials
|
||||
|
||||
|
||||
@@ -55,7 +55,6 @@
|
||||
* [Emergency Shutdown](emergency_shutdown.md)
|
||||
* [Auto scalability](auto_scalability.md)
|
||||
* [Forecaster](forecaster.md)
|
||||
* [SDN Controller](sdn_controller.md)
|
||||
* [Recipes](recipes.md)
|
||||
* [Reverse proxy](reverse_proxy.md)
|
||||
* [How to contribute?](contributing.md)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 99 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 85 KiB |
@@ -15,6 +15,5 @@ We've made multiple categories to help you to find what you need:
|
||||
* [Job Manager](scheduler.html)
|
||||
* [Alerts](alerts.html)
|
||||
* [Load balancing](load_balancing.html)
|
||||
* [SDN Controller](sdn_controller.html)
|
||||
|
||||

|
||||
|
||||
@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
|
||||
|
||||
```
|
||||
$ node -v
|
||||
v8.16.2
|
||||
v8.12.0
|
||||
```
|
||||
|
||||
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
|
||||
# Installation
|
||||
|
||||
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||

|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
## [More on XOA and alternate deploy](xoa.md)
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||

|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# SDN Controller
|
||||
|
||||
> SDN Controller is available in XOA 5.44 and higher
|
||||
|
||||
The SDN Controller enables a user to **create pool-wide and cross-pool** (since XOA 5.48.1) **private networks**.
|
||||
|
||||

|
||||
|
||||
## How does it work?
|
||||
|
||||
Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.com/blog/xo-sdn-controller/) and its [extension for cross-pool private networks](https://xen-orchestra.com/blog/devblog-3-extending-the-sdn-controller/).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view:
|
||||
- Select a `pool`
|
||||
- Select `Private network`
|
||||
- Select an interface on which to create the network's tunnels
|
||||
- Select the encapsulation: a choice is offered between `GRE` and `VxLAN`, if `VxLAN` is chosen, then port 4789 must be open for UDP traffic on all the network's hosts (see [the requirements](#vxlan))
|
||||
- Choose if the network should be encrypted or not (see [the requirements](#encryption) to use encryption)
|
||||
- Select other `pool`s to add them to the network if desired
|
||||
- For each added `pool`: select an interface on which to create the tunnels
|
||||
- Create the network
|
||||
- Have fun! ☺
|
||||
|
||||
***NB:***
|
||||
- All hosts in a private network must be able to reach the other hosts' management interface.
|
||||
> The term ‘management interface’ is used to indicate the IP-enabled NIC that carries the management traffic.
|
||||
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: The path where the plugin will look for the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs`: Enable to uninstall the existing SDN controller CA certificate in order to replace it with the plugin's one.
|
||||
|
||||
## Requirements
|
||||
|
||||
### VxLAN
|
||||
|
||||
- On XCP-ng prior to 7.6:
|
||||
- To be able to use `VxLAN`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Encryption
|
||||
|
||||
> Encryption is not available prior to 8.0.
|
||||
|
||||
- On XCP-ng 8.0:
|
||||
- To be able to encrypt the networks, `openvswitch-ipsec` package must be installed on all the hosts:
|
||||
- `yum install openvswitch-ipsec --enablerepo=xcp-ng-testing`
|
||||
- `systemctl enable ipsec`
|
||||
- `systemctl enable openvswitch-ipsec`
|
||||
- `systemctl start ipsec`
|
||||
- `systemctl start openvswitch-ipsec`
|
||||
@@ -110,17 +110,16 @@ $ systemctl restart xo-server
|
||||
|
||||
### Behind a transparent proxy
|
||||
|
||||
If you're behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
If your are behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
|
||||
|
||||
Run the following commands to allow the updater to work:
|
||||
First, run the following commands:
|
||||
|
||||
```
|
||||
$ sudo -s
|
||||
$ echo NODE_TLS_REJECT_UNAUTHORIZED=0 >> /etc/xo-appliance/env
|
||||
$ npm config -g set strict-ssl=false
|
||||
$ systemctl restart xoa-updater
|
||||
```
|
||||
Now try running an update again.
|
||||
|
||||
Then, restart the updater with `systemctl restart xoa-updater`.
|
||||
|
||||
### Updating SSL self-signed certificate
|
||||
|
||||
|
||||
@@ -41,20 +41,6 @@ However, if you want to start a manual check, you can do it by clicking on the "
|
||||
|
||||

|
||||
|
||||
#### Release channel
|
||||
In Xen Orchestra, you can make a choice between two different release channels.
|
||||
|
||||
##### Stable
|
||||
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
|
||||
|
||||
##### Latest
|
||||
|
||||
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
|
||||
|
||||
> To select the release channel of your choice, go to the XOA > Updates view.
|
||||
|
||||

|
||||
|
||||
#### Upgrade
|
||||
|
||||
If a new version is found, you'll have an upgrade button and its tooltip displayed:
|
||||
|
||||
18
docs/xoa.md
18
docs/xoa.md
@@ -22,36 +22,26 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
|
||||
|
||||
### The quickest way
|
||||
|
||||
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
|
||||
|
||||
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
|
||||
|
||||

|
||||
|
||||
### Via a bash script
|
||||
|
||||
Alternatively, you can deploy it by connecting to your XenServer host and executing the following:
|
||||
The fastest way to install Xen Orchestra is to use our appliance deploy script. You can deploy it by connecting to your XenServer host and executing the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
**Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
> **Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
|
||||
|
||||
Follow the instructions:
|
||||
Now follow the instructions:
|
||||
|
||||
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
|
||||
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
|
||||
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
|
||||
|
||||
### Via download the XVA
|
||||
### The alternative
|
||||
|
||||
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
|
||||
|
||||
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
|
||||
|
||||
## First Login
|
||||
|
||||
Once you have started the VM, you can access the web UI by putting the IP you configured during deployment into your web browser. If you did not configure an IP or are unsure, try one of the following methods to find it:
|
||||
|
||||
* Run `xe vm-list params=name-label,networks | grep -A 1 XOA` on your host
|
||||
|
||||
12
package.json
12
package.json
@@ -8,22 +8,22 @@
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^6.0.1",
|
||||
"eslint-config-prettier": "^6.0.0",
|
||||
"eslint-config-standard": "14.1.0",
|
||||
"eslint-config-standard-jsx": "^8.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^10.0.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.109.0",
|
||||
"flow-bin": "^0.102.0",
|
||||
"globby": "^10.0.0",
|
||||
"husky": "^3.0.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -42,11 +42,9 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-server-test/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"timers": "fake",
|
||||
"transform": {
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
|
||||
@@ -35,8 +35,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -33,8 +33,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -27,8 +27,8 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"cli-progress": "^3.1.0",
|
||||
"@xen-orchestra/fs": "^0.10.0",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
@@ -39,11 +39,11 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"rimraf": "^3.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -21,13 +21,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -36,15 +35,15 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.10.1",
|
||||
"@xen-orchestra/fs": "^0.10.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^2.0.2",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^3.0.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import resolveRelativeFromFile from './_resolveRelativeFromFile'
|
||||
|
||||
@@ -14,23 +13,18 @@ import {
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const { warn } = createLogger('vhd-lib:createSyntheticStream')
|
||||
|
||||
export default async function createSyntheticStream(handler, paths) {
|
||||
export default async function createSyntheticStream(handler, path) {
|
||||
const fds = []
|
||||
const cleanup = () => {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
warn('error while closing file', {
|
||||
error,
|
||||
fd: fds[i],
|
||||
})
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
try {
|
||||
const vhds = []
|
||||
const open = async path => {
|
||||
while (true) {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
@@ -38,18 +32,11 @@ export default async function createSyntheticStream(handler, paths) {
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
|
||||
return vhd
|
||||
}
|
||||
if (typeof paths === 'string') {
|
||||
let path = paths
|
||||
let vhd
|
||||
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
} else {
|
||||
for (const path of paths) {
|
||||
await open(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import assert from 'assert'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
@@ -16,7 +15,10 @@ import {
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const { debug } = createLogger('vhd-lib:Vhd')
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
|
||||
@@ -41,15 +41,15 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.27.2"
|
||||
"xen-api": "^0.27.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.5",
|
||||
"@babel/preset-env": "^7.1.5",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.4",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
189
packages/xen-api/examples/package-lock.json
generated
189
packages/xen-api/examples/package-lock.json
generated
@@ -1,189 +0,0 @@
|
||||
{
|
||||
"requires": true,
|
||||
"lockfileVersion": 1,
|
||||
"dependencies": {
|
||||
"core-util-is": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
|
||||
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
|
||||
},
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"event-loop-delay": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/event-loop-delay/-/event-loop-delay-1.0.0.tgz",
|
||||
"integrity": "sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==",
|
||||
"requires": {
|
||||
"napi-macros": "^1.8.2",
|
||||
"node-gyp-build": "^3.7.0"
|
||||
}
|
||||
},
|
||||
"getopts": {
|
||||
"version": "2.2.5",
|
||||
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
|
||||
"integrity": "sha512-9jb7AW5p3in+IiJWhQiZmmwkpLaR/ccTWdWQCtZM66HJcHHLegowh4q4tSD7gouUyeNvFWRavfK9GXosQHDpFA=="
|
||||
},
|
||||
"golike-defer": {
|
||||
"version": "0.4.1",
|
||||
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
|
||||
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
|
||||
},
|
||||
"human-format": {
|
||||
"version": "0.10.1",
|
||||
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
|
||||
"integrity": "sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg=="
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"isarray": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
|
||||
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
|
||||
},
|
||||
"make-error": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
|
||||
"integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g=="
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"napi-macros": {
|
||||
"version": "1.8.2",
|
||||
"resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-1.8.2.tgz",
|
||||
"integrity": "sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg=="
|
||||
},
|
||||
"node-gyp-build": {
|
||||
"version": "3.9.0",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-3.9.0.tgz",
|
||||
"integrity": "sha512-zLcTg6P4AbcHPq465ZMFNXx7XpKKJh+7kkN699NiQWisR2uWYOWNWqRHAmbnmKiL4e9aLSlmy5U7rEMUXV59+A=="
|
||||
},
|
||||
"prettier-bytes": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/prettier-bytes/-/prettier-bytes-1.0.4.tgz",
|
||||
"integrity": "sha1-mUsCqkb2mcULYle1+qp/4lV+YtY="
|
||||
},
|
||||
"process-nextick-args": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
|
||||
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
|
||||
},
|
||||
"process-top": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/process-top/-/process-top-1.0.0.tgz",
|
||||
"integrity": "sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==",
|
||||
"requires": {
|
||||
"event-loop-delay": "^1.0.0",
|
||||
"prettier-bytes": "^1.0.4"
|
||||
}
|
||||
},
|
||||
"progress-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/progress-stream/-/progress-stream-2.0.0.tgz",
|
||||
"integrity": "sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=",
|
||||
"requires": {
|
||||
"speedometer": "~1.0.0",
|
||||
"through2": "~2.0.3"
|
||||
}
|
||||
},
|
||||
"promise-toolbox": {
|
||||
"version": "0.13.0",
|
||||
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.13.0.tgz",
|
||||
"integrity": "sha512-Z6u7EL9/QyY1zZqeqpEiKS7ygKwZyl0JL0ouno/en6vMliZZc4AmM0aFCrDAVxEyKqj2f3SpkW0lXEfAZsNWiQ==",
|
||||
"requires": {
|
||||
"make-error": "^1.3.2"
|
||||
}
|
||||
},
|
||||
"readable-stream": {
|
||||
"version": "3.4.0",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.4.0.tgz",
|
||||
"integrity": "sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==",
|
||||
"requires": {
|
||||
"inherits": "^2.0.3",
|
||||
"string_decoder": "^1.1.1",
|
||||
"util-deprecate": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"safe-buffer": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
|
||||
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
|
||||
},
|
||||
"speedometer": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/speedometer/-/speedometer-1.0.0.tgz",
|
||||
"integrity": "sha1-zWccsGdSwivKM3Di8zREC+T8YuI="
|
||||
},
|
||||
"stream-parser": {
|
||||
"version": "0.3.1",
|
||||
"resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz",
|
||||
"integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=",
|
||||
"requires": {
|
||||
"debug": "2"
|
||||
}
|
||||
},
|
||||
"string_decoder": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
|
||||
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
|
||||
"requires": {
|
||||
"safe-buffer": "~5.1.0"
|
||||
}
|
||||
},
|
||||
"throttle": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
|
||||
"integrity": "sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=",
|
||||
"requires": {
|
||||
"readable-stream": ">= 0.3.0",
|
||||
"stream-parser": ">= 0.0.2"
|
||||
}
|
||||
},
|
||||
"through2": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
|
||||
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
|
||||
"requires": {
|
||||
"readable-stream": "~2.3.6",
|
||||
"xtend": "~4.0.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"readable-stream": {
|
||||
"version": "2.3.6",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
|
||||
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
|
||||
"requires": {
|
||||
"core-util-is": "~1.0.0",
|
||||
"inherits": "~2.0.3",
|
||||
"isarray": "~1.0.0",
|
||||
"process-nextick-args": "~2.0.0",
|
||||
"safe-buffer": "~5.1.1",
|
||||
"string_decoder": "~1.1.1",
|
||||
"util-deprecate": "~1.0.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
|
||||
},
|
||||
"xtend": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
|
||||
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"human-format": "^0.10.1",
|
||||
"process-top": "^1.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"readable-stream": "^3.1.1",
|
||||
"throttle": "^1.0.3"
|
||||
}
|
||||
|
||||
179
packages/xen-api/examples/yarn.lock
Normal file
179
packages/xen-api/examples/yarn.lock
Normal file
@@ -0,0 +1,179 @@
|
||||
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
|
||||
# yarn lockfile v1
|
||||
|
||||
|
||||
core-util-is@~1.0.0:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
|
||||
integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
|
||||
|
||||
debug@2:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
|
||||
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
|
||||
dependencies:
|
||||
ms "2.0.0"
|
||||
|
||||
event-loop-delay@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/event-loop-delay/-/event-loop-delay-1.0.0.tgz#5af6282549494fd0d868c499cbdd33e027978b8c"
|
||||
integrity sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==
|
||||
dependencies:
|
||||
napi-macros "^1.8.2"
|
||||
node-gyp-build "^3.7.0"
|
||||
|
||||
getopts@^2.2.3:
|
||||
version "2.2.3"
|
||||
resolved "https://registry.yarnpkg.com/getopts/-/getopts-2.2.3.tgz#11d229775e2ec2067ed8be6fcc39d9b4bf39cf7d"
|
||||
integrity sha512-viEcb8TpgeG05+Nqo5EzZ8QR0hxdyrYDp6ZSTZqe2M/h53Bk036NmqG38Vhf5RGirC/Of9Xql+v66B2gp256SQ==
|
||||
|
||||
golike-defer@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
|
||||
|
||||
human-format@^0.10.1:
|
||||
version "0.10.1"
|
||||
resolved "https://registry.yarnpkg.com/human-format/-/human-format-0.10.1.tgz#107793f355912e256148d5b5dcf66a0230187ee9"
|
||||
integrity sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg==
|
||||
|
||||
inherits@^2.0.3, inherits@~2.0.3:
|
||||
version "2.0.3"
|
||||
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
|
||||
integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
|
||||
|
||||
isarray@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
|
||||
integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=
|
||||
|
||||
make-error@^1.3.2:
|
||||
version "1.3.5"
|
||||
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.5.tgz#efe4e81f6db28cadd605c70f29c831b58ef776c8"
|
||||
integrity sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==
|
||||
|
||||
ms@2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
|
||||
integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
|
||||
|
||||
napi-macros@^1.8.2:
|
||||
version "1.8.2"
|
||||
resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda"
|
||||
integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg==
|
||||
|
||||
node-gyp-build@^3.7.0:
|
||||
version "3.7.0"
|
||||
resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.7.0.tgz#daa77a4f547b9aed3e2aac779eaf151afd60ec8d"
|
||||
integrity sha512-L/Eg02Epx6Si2NXmedx+Okg+4UHqmaf3TNcxd50SF9NQGcJaON3AtU++kax69XV7YWz4tUspqZSAsVofhFKG2w==
|
||||
|
||||
prettier-bytes@^1.0.4:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/prettier-bytes/-/prettier-bytes-1.0.4.tgz#994b02aa46f699c50b6257b5faaa7fe2557e62d6"
|
||||
integrity sha1-mUsCqkb2mcULYle1+qp/4lV+YtY=
|
||||
|
||||
process-nextick-args@~2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa"
|
||||
integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==
|
||||
|
||||
process-top@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/process-top/-/process-top-1.0.0.tgz#52892bedb581c5abf0df2d0aa5c429e34275cc7e"
|
||||
integrity sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==
|
||||
dependencies:
|
||||
event-loop-delay "^1.0.0"
|
||||
prettier-bytes "^1.0.4"
|
||||
|
||||
progress-stream@^2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/progress-stream/-/progress-stream-2.0.0.tgz#fac63a0b3d11deacbb0969abcc93b214bce19ed5"
|
||||
integrity sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=
|
||||
dependencies:
|
||||
speedometer "~1.0.0"
|
||||
through2 "~2.0.3"
|
||||
|
||||
promise-toolbox@^0.11.0:
|
||||
version "0.11.0"
|
||||
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.11.0.tgz#9ed928355355395072dace3f879879504e07d1bc"
|
||||
integrity sha512-bjHk0kq+Ke3J3zbkbbJH6kXCyQZbFHwOTrE/Et7vS0uS0tluoV+PLqU/kEyxl8aARM7v04y2wFoDo/wWAEPvjA==
|
||||
dependencies:
|
||||
make-error "^1.3.2"
|
||||
|
||||
"readable-stream@>= 0.3.0", readable-stream@^3.1.1:
|
||||
version "3.1.1"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.1.1.tgz#ed6bbc6c5ba58b090039ff18ce670515795aeb06"
|
||||
integrity sha512-DkN66hPyqDhnIQ6Jcsvx9bFjhw214O4poMBcIMgPVpQvNy9a0e0Uhg5SqySyDKAmUlwt8LonTBz1ezOnM8pUdA==
|
||||
dependencies:
|
||||
inherits "^2.0.3"
|
||||
string_decoder "^1.1.1"
|
||||
util-deprecate "^1.0.1"
|
||||
|
||||
readable-stream@~2.3.6:
|
||||
version "2.3.6"
|
||||
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
|
||||
integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==
|
||||
dependencies:
|
||||
core-util-is "~1.0.0"
|
||||
inherits "~2.0.3"
|
||||
isarray "~1.0.0"
|
||||
process-nextick-args "~2.0.0"
|
||||
safe-buffer "~5.1.1"
|
||||
string_decoder "~1.1.1"
|
||||
util-deprecate "~1.0.1"
|
||||
|
||||
safe-buffer@~5.1.0, safe-buffer@~5.1.1:
|
||||
version "5.1.2"
|
||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
|
||||
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
|
||||
|
||||
speedometer@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/speedometer/-/speedometer-1.0.0.tgz#cd671cb06752c22bca3370e2f334440be4fc62e2"
|
||||
integrity sha1-zWccsGdSwivKM3Di8zREC+T8YuI=
|
||||
|
||||
"stream-parser@>= 0.0.2":
|
||||
version "0.3.1"
|
||||
resolved "https://registry.yarnpkg.com/stream-parser/-/stream-parser-0.3.1.tgz#1618548694420021a1182ff0af1911c129761773"
|
||||
integrity sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=
|
||||
dependencies:
|
||||
debug "2"
|
||||
|
||||
string_decoder@^1.1.1:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.2.0.tgz#fe86e738b19544afe70469243b2a1ee9240eae8d"
|
||||
integrity sha512-6YqyX6ZWEYguAxgZzHGL7SsCeGx3V2TtOTqZz1xSTSWnqsbWwbptafNyvf/ACquZUXV3DANr5BDIwNYe1mN42w==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
string_decoder@~1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
|
||||
integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
|
||||
dependencies:
|
||||
safe-buffer "~5.1.0"
|
||||
|
||||
throttle@^1.0.3:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/throttle/-/throttle-1.0.3.tgz#8a32e4a15f1763d997948317c5ebe3ad8a41e4b7"
|
||||
integrity sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=
|
||||
dependencies:
|
||||
readable-stream ">= 0.3.0"
|
||||
stream-parser ">= 0.0.2"
|
||||
|
||||
through2@~2.0.3:
|
||||
version "2.0.5"
|
||||
resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
|
||||
integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
|
||||
dependencies:
|
||||
readable-stream "~2.3.6"
|
||||
xtend "~4.0.1"
|
||||
|
||||
util-deprecate@^1.0.1, util-deprecate@~1.0.1:
|
||||
version "1.0.2"
|
||||
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
|
||||
integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
|
||||
|
||||
xtend@~4.0.1:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
|
||||
integrity sha1-pcbVMr5lbiPbgg77lDofBJmNY68=
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.27.2",
|
||||
"version": "0.27.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -46,7 +46,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -60,8 +60,8 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?(?:\[([^\]]+)\]|([^:/]+))(?::([0-9]+))?\/?$/
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
@@ -6,15 +6,7 @@ export default url => {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [
|
||||
,
|
||||
protocol = 'https:',
|
||||
username,
|
||||
password,
|
||||
ipv6,
|
||||
hostname = ipv6,
|
||||
port,
|
||||
] = matches
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
|
||||
@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
|
||||
import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find } from 'lodash'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
@@ -110,7 +110,7 @@ const main = async args => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
}).then(value => (isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
@@ -4,7 +4,7 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import { EventEmitter } from 'events'
|
||||
import { map, noop, omit } from 'lodash'
|
||||
import { isArray, map, noop, omit } from 'lodash'
|
||||
import {
|
||||
cancelable,
|
||||
defer,
|
||||
@@ -113,7 +113,7 @@ export class Xapi extends EventEmitter {
|
||||
this._watchedTypes = undefined
|
||||
const { watchEvents } = opts
|
||||
if (watchEvents !== false) {
|
||||
if (Array.isArray(watchEvents)) {
|
||||
if (isArray(watchEvents)) {
|
||||
this._watchedTypes = watchEvents
|
||||
}
|
||||
this.watchEvents()
|
||||
@@ -1075,7 +1075,7 @@ export class Xapi extends EventEmitter {
|
||||
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
|
||||
|
||||
const value = data[field]
|
||||
if (Array.isArray(value)) {
|
||||
if (isArray(value)) {
|
||||
if (value.length === 0 || isOpaqueRef(value[0])) {
|
||||
getters[$field] = function() {
|
||||
const value = this[field]
|
||||
|
||||
@@ -38,16 +38,16 @@
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
"micromatch": "^4.0.2",
|
||||
"micromatch": "^3.1.3",
|
||||
"mkdirp": "^0.5.1",
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^5.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"promise-toolbox": "^0.13.0",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^3.0.0",
|
||||
"xdg-basedir": "^4.0.0",
|
||||
"strip-indent": "^2.0.0",
|
||||
"xdg-basedir": "^3.0.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -56,8 +56,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -7,6 +7,7 @@ const promisify = require('bluebird').promisify
|
||||
const readFile = promisify(require('fs').readFile)
|
||||
const writeFile = promisify(require('fs').writeFile)
|
||||
|
||||
const assign = require('lodash/assign')
|
||||
const l33t = require('l33teral')
|
||||
const mkdirp = promisify(require('mkdirp'))
|
||||
const xdgBasedir = require('xdg-basedir')
|
||||
@@ -40,7 +41,7 @@ const save = (exports.save = function(config) {
|
||||
|
||||
exports.set = function(data) {
|
||||
return load().then(function(config) {
|
||||
return save(Object.assign(config, data))
|
||||
return save(assign(config, data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ const getKeys = require('lodash/keys')
|
||||
const hrp = require('http-request-plus').default
|
||||
const humanFormat = require('human-format')
|
||||
const identity = require('lodash/identity')
|
||||
const isArray = require('lodash/isArray')
|
||||
const isObject = require('lodash/isObject')
|
||||
const micromatch = require('micromatch')
|
||||
const nicePipe = require('nice-pipe')
|
||||
@@ -198,18 +199,7 @@ function main(args) {
|
||||
return exports[fnName](args.slice(1))
|
||||
}
|
||||
|
||||
return exports.call(args).catch(error => {
|
||||
if (!(error != null && error.code === 10 && 'errors' in error.data)) {
|
||||
throw error
|
||||
}
|
||||
|
||||
const lines = [error.message]
|
||||
const { errors } = error.data
|
||||
errors.forEach(error => {
|
||||
lines.push(` property ${error.property}: ${error.message}`)
|
||||
})
|
||||
throw lines.join('\n')
|
||||
})
|
||||
return exports.call(args)
|
||||
}
|
||||
exports = module.exports = main
|
||||
|
||||
@@ -297,11 +287,7 @@ async function listCommands(args) {
|
||||
str.push(
|
||||
name,
|
||||
'=<',
|
||||
type == null
|
||||
? 'unknown type'
|
||||
: Array.isArray(type)
|
||||
? type.join('|')
|
||||
: type,
|
||||
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
|
||||
'>'
|
||||
)
|
||||
|
||||
|
||||
@@ -34,9 +34,9 @@
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -36,8 +36,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { iteratee } from 'lodash'
|
||||
import { isArray, iteratee } from 'lodash'
|
||||
|
||||
class XoError extends BaseError {
|
||||
constructor({ code, message, data }) {
|
||||
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
|
||||
}))
|
||||
|
||||
export const invalidParameters = create(10, (message, errors) => {
|
||||
if (Array.isArray(message)) {
|
||||
if (isArray(message)) {
|
||||
errors = message
|
||||
message = undefined
|
||||
}
|
||||
|
||||
@@ -41,8 +41,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"cross-env": "^5.1.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^3.0.0"
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -36,18 +36,18 @@
|
||||
"dependencies": {
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"inquirer": "^7.0.0",
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.14.0"
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -40,8 +40,8 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -48,8 +48,8 @@
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -354,7 +354,7 @@ class BackupReportsXoPlugin {
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
success: log.status === 'success',
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
|
||||
@@ -390,7 +390,7 @@ class BackupReportsXoPlugin {
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
success: false,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
@@ -646,7 +646,7 @@ class BackupReportsXoPlugin {
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
success: log.status === 'success',
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
@@ -656,7 +656,7 @@ class BackupReportsXoPlugin {
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport({ markdown, subject, success, nagiosMarkdown }) {
|
||||
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
@@ -676,14 +676,9 @@ class BackupReportsXoPlugin {
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: success ? 0 : 2,
|
||||
status: nagiosStatus,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
xo.sendIcinga2Status !== undefined &&
|
||||
xo.sendIcinga2Status({
|
||||
status: success ? 'OK' : 'CRITICAL',
|
||||
message: markdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -713,7 +708,7 @@ class BackupReportsXoPlugin {
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
success: false,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
|
||||
})
|
||||
}
|
||||
@@ -909,7 +904,7 @@ class BackupReportsXoPlugin {
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
success: globalSuccess,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
|
||||
10
packages/xo-server-cloud/.npmignore
Normal file
10
packages/xo-server-cloud/.npmignore
Normal file
@@ -0,0 +1,10 @@
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
@@ -1,6 +1,4 @@
|
||||
# xo-server-transport-icinga2 [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
> xo-server plugin to send status to icinga2 server
|
||||
# xo-server-cloud [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
## Install
|
||||
|
||||
@@ -13,13 +11,6 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
|
||||
|
||||
## Development
|
||||
|
||||
### `Xo#sendIcinga2Status({ status, message })`
|
||||
|
||||
This xo method is called to send a passive check to icinga2 and change the status of a service.
|
||||
It has two parameters:
|
||||
- status: it's the service status in icinga2 (0: OK | 1: WARNING | 2: CRITICAL | 3: UNKNOWN).
|
||||
- message: it's the status information in icinga2.
|
||||
|
||||
```
|
||||
# Install dependencies
|
||||
> npm install
|
||||
@@ -1,35 +1,46 @@
|
||||
{
|
||||
"name": "@xen-orchestra/template",
|
||||
"version": "0.1.0",
|
||||
"name": "xo-server-cloud",
|
||||
"version": "0.2.4",
|
||||
"license": "ISC",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/template",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
"cloud",
|
||||
"orchestra",
|
||||
"plugin",
|
||||
"xen",
|
||||
"xen-orchestra",
|
||||
"xo-server"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/template",
|
||||
"directory": "packages/xo-server-cloud",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@vates.fr"
|
||||
"name": "Pierre Donias",
|
||||
"email": "pierre.donias@gmail.com"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.5.4"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -37,10 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish --access public"
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.15"
|
||||
}
|
||||
"private": true
|
||||
}
|
||||
158
packages/xo-server-cloud/src/index.js
Normal file
158
packages/xo-server-cloud/src/index.js
Normal file
@@ -0,0 +1,158 @@
|
||||
import Client, { createBackoff } from 'jsonrpc-websocket-client'
|
||||
import hrp from 'http-request-plus'
|
||||
|
||||
const WS_URL = 'ws://localhost:9001'
|
||||
const HTTP_URL = 'http://localhost:9002'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class XoServerCloud {
|
||||
constructor({ xo }) {
|
||||
this._xo = xo
|
||||
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._key = null
|
||||
}
|
||||
|
||||
configure(configuration) {
|
||||
this._conf = configuration
|
||||
}
|
||||
|
||||
async load() {
|
||||
const getResourceCatalog = () => this._getCatalog()
|
||||
getResourceCatalog.description = 'Get the list of all available resources'
|
||||
getResourceCatalog.permission = 'admin'
|
||||
|
||||
const registerResource = ({ namespace }) =>
|
||||
this._registerResource(namespace)
|
||||
registerResource.description = 'Register a resource via cloud plugin'
|
||||
registerResource.params = {
|
||||
namespace: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
registerResource.permission = 'admin'
|
||||
|
||||
this._unsetApiMethods = this._xo.addApiMethods({
|
||||
cloud: {
|
||||
getResourceCatalog,
|
||||
registerResource,
|
||||
},
|
||||
})
|
||||
this._unsetRequestResource = this._xo.defineProperty(
|
||||
'requestResource',
|
||||
this._requestResource,
|
||||
this
|
||||
)
|
||||
|
||||
const updater = (this._updater = new Client(WS_URL))
|
||||
const connect = () =>
|
||||
updater.open(createBackoff()).catch(error => {
|
||||
console.error('xo-server-cloud: fail to connect to updater', error)
|
||||
|
||||
return connect()
|
||||
})
|
||||
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
|
||||
console.warn('xo-server-cloud: next attempt in %s ms', delay)
|
||||
})
|
||||
connect()
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._unsetApiMethods()
|
||||
this._unsetRequestResource()
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getCatalog() {
|
||||
const catalog = await this._updater.call('getResourceCatalog')
|
||||
|
||||
if (!catalog) {
|
||||
throw new Error('cannot get catalog')
|
||||
}
|
||||
|
||||
return catalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaces() {
|
||||
const catalog = await this._getCatalog()
|
||||
|
||||
if (!catalog._namespaces) {
|
||||
throw new Error('cannot get namespaces')
|
||||
}
|
||||
|
||||
return catalog._namespaces
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _registerResource(namespace) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (_namespace === undefined) {
|
||||
throw new Error(`${namespace} is not available`)
|
||||
}
|
||||
|
||||
if (_namespace.registered || _namespace.pending) {
|
||||
throw new Error(`already registered for ${namespace}`)
|
||||
}
|
||||
|
||||
return this._updater.call('registerResource', { namespace })
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _getNamespaceCatalog(namespace) {
|
||||
const namespaceCatalog = (await this._getCatalog())[namespace]
|
||||
|
||||
if (!namespaceCatalog) {
|
||||
throw new Error(`cannot get catalog: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
return namespaceCatalog
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
async _requestResource(namespace, id, version) {
|
||||
const _namespace = (await this._getNamespaces())[namespace]
|
||||
|
||||
if (!_namespace || !_namespace.registered) {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
if (!downloadToken) {
|
||||
throw new Error('cannot get download token')
|
||||
}
|
||||
|
||||
const response = await hrp(HTTP_URL, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${downloadToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
// currently needed for XenApi#putResource()
|
||||
response.length = response.headers['content-length']
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new XoServerCloud(opts)
|
||||
@@ -31,7 +31,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.6",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^2.0.1",
|
||||
"lodash": "^4.17.4"
|
||||
@@ -32,8 +32,8 @@
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^6.0.3",
|
||||
"rimraf": "^3.0.0"
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const COMPARATOR_FN = {
|
||||
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
Object.assign(result, {
|
||||
assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
Object.assign(result, {
|
||||
assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
@@ -680,7 +680,7 @@ ${entry.listItem}
|
||||
},
|
||||
}
|
||||
if (xapiObject.$type === 'VM') {
|
||||
payload.vm_uuid = xapiObject.uuid
|
||||
payload['vm_uuid'] = xapiObject.uuid
|
||||
}
|
||||
// JSON is not well formed, can't use the default node parser
|
||||
return JSON5.parse(
|
||||
|
||||
@@ -1,14 +1,31 @@
|
||||
# xo-server-sdn-controller [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
XO Server plugin that allows the creation of pool-wide and cross-pool private networks.
|
||||
XO Server plugin that allows the creation of pool-wide private networks.
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Documentation
|
||||
## Usage
|
||||
|
||||
Please see the plugin's [official documentation](https://xen-orchestra.com/docs/sdn_controller.html).
|
||||
### Network creation
|
||||
|
||||
In the network creation view, select a `pool` and `Private network`.
|
||||
Create the network.
|
||||
|
||||
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
|
||||
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
|
||||
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
|
||||
|
||||
## Contributions
|
||||
|
||||
|
||||
@@ -15,24 +15,22 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.3.1",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^6.0.3"
|
||||
"cross-env": "^5.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.98",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"uuid": "^3.3.2"
|
||||
"node-openssl-cert": "^0.0.84",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,8 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
import { forOwn, toPairs } from 'lodash'
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
@@ -10,139 +10,102 @@ const OVSDB_PORT = 6640
|
||||
|
||||
// =============================================================================
|
||||
|
||||
function toMap(object) {
|
||||
return ['map', toPairs(object)]
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
/*
|
||||
Create an SSL connection to an XCP-ng host.
|
||||
Interact with the host's OpenVSwitch (OVS) daemon to create and manage the virtual bridges
|
||||
corresponding to the private networks with OVSDB (OpenVSwitch DataBase) Protocol.
|
||||
See:
|
||||
- OVSDB Protocol: https://tools.ietf.org/html/rfc7047
|
||||
- OVS Tunneling : http://docs.openvswitch.org/en/latest/howto/tunneling/
|
||||
- OVS IPSEC : http://docs.openvswitch.org/en/latest/howto/ipsec/
|
||||
|
||||
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
|
||||
- `other_config`:
|
||||
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
|
||||
|
||||
Attributes on created OVS interfaces:
|
||||
- `options`:
|
||||
- `key` : Network's VNI
|
||||
- `remote_ip`: Remote IP of the tunnel
|
||||
*/
|
||||
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._host = host
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestId = 0
|
||||
|
||||
this._adding = []
|
||||
|
||||
this.host = host
|
||||
this._requestID = 0
|
||||
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug('New OVSDB client', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.debug(`[${this._host.name_label}] New OVSDB client`)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
get address() {
|
||||
return this._host.address
|
||||
}
|
||||
|
||||
get host() {
|
||||
return this._host.$ref
|
||||
}
|
||||
|
||||
get id() {
|
||||
return this._host.$id
|
||||
}
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug('Certificates have been updated', {
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.debug(`[${this._host.name_label}] Certificates have been updated`)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
network,
|
||||
networkUuid,
|
||||
networkName,
|
||||
remoteAddress,
|
||||
encapsulation,
|
||||
key,
|
||||
password,
|
||||
privateNetworkUuid
|
||||
encapsulation
|
||||
) {
|
||||
if (
|
||||
this._adding.find(
|
||||
elem => elem.id === network.uuid && elem.addr === remoteAddress
|
||||
) !== undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
const adding = { id: network.uuid, addr: remoteAddress }
|
||||
this._adding.push(adding)
|
||||
|
||||
const socket = await this._connect()
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
const index = this._numberOfPortAndInterface
|
||||
++this._numberOfPortAndInterface
|
||||
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridge,
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
return bridge.name
|
||||
return
|
||||
}
|
||||
|
||||
const index = ++this._numberOfPortAndInterface
|
||||
const interfaceName = bridge.name + '_iface' + index
|
||||
const portName = bridge.name + '_port' + index
|
||||
const interfaceName = 'tunnel_iface' + index
|
||||
const portName = 'tunnel_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = { remote_ip: remoteAddress, key: key }
|
||||
if (password !== undefined) {
|
||||
options.psk = password
|
||||
}
|
||||
const options = ['map', [['remote_ip', remoteAddress]]]
|
||||
const addInterfaceOperation = {
|
||||
op: 'insert',
|
||||
table: 'Interface',
|
||||
row: {
|
||||
type: encapsulation,
|
||||
options: toMap(options),
|
||||
options: options,
|
||||
name: interfaceName,
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_iface',
|
||||
}
|
||||
|
||||
const addPortOperation = {
|
||||
op: 'insert',
|
||||
table: 'Port',
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: toMap({
|
||||
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
|
||||
}),
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
@@ -152,11 +115,7 @@ export class OvsdbClient {
|
||||
mutateBridgeOperation,
|
||||
]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
|
||||
this._adding = this._adding.filter(
|
||||
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
|
||||
)
|
||||
if (jsonObjects === undefined) {
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
@@ -167,49 +126,42 @@ export class OvsdbClient {
|
||||
let opResult
|
||||
do {
|
||||
opResult = jsonObjects[0].result[i]
|
||||
if (opResult?.error !== undefined) {
|
||||
if (opResult != null && opResult.error != null) {
|
||||
error = opResult.error
|
||||
details = opResult.details
|
||||
}
|
||||
++i
|
||||
} while (opResult !== undefined && error === undefined)
|
||||
} while (opResult && !error)
|
||||
|
||||
if (error !== undefined) {
|
||||
log.error('Error while adding port and interface to bridge', {
|
||||
error,
|
||||
details,
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
if (error != null) {
|
||||
log.error(
|
||||
`[${this._host.name_label}] Error while adding port: '${portName}' and interface: '${interfaceName}' to bridge: '${bridgeName}' on network: '${networkName}' because: ${error}: ${details}`
|
||||
)
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Port and interface added to bridge', {
|
||||
port: portName,
|
||||
interface: interfaceName,
|
||||
bridge: bridge.name,
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Port: '${portName}' and interface: '${interfaceName}' added to bridge: '${bridgeName}' on network: '${networkName}'`
|
||||
)
|
||||
socket.destroy()
|
||||
return bridge.name
|
||||
}
|
||||
|
||||
async resetForNetwork(network, privateNetworkUuid) {
|
||||
async resetForNetwork(networkUuid, networkName) {
|
||||
const socket = await this._connect()
|
||||
const bridge = await this._getBridgeForNetwork(network, socket)
|
||||
if (bridge.uuid === undefined) {
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
@@ -224,24 +176,15 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
if (selectResult == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
forOwn(selectResult.other_config[1], config => {
|
||||
// 2019-09-03
|
||||
// Compatibility code, to be removed in 1 year.
|
||||
const oldShouldDelete =
|
||||
config[0] === 'private_pool_wide' ||
|
||||
config[0] === 'cross_pool' ||
|
||||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
|
||||
config[0] === 'xo:sdn-controller:cross-pool'
|
||||
|
||||
const shouldDelete =
|
||||
config[0] === 'xo:sdn-controller:private-network-uuid' &&
|
||||
config[1] === privateNetworkUuid
|
||||
|
||||
if (shouldDelete || oldShouldDelete) {
|
||||
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Adding port: '${selectResult.name}' to delete list from bridge: '${bridgeName}'`
|
||||
)
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
}
|
||||
})
|
||||
@@ -256,31 +199,27 @@ export class OvsdbClient {
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', mutateBridgeOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error('Error while deleting ports from bridge', {
|
||||
error: jsonObjects[0].error,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.error(
|
||||
`[${this._host.name_label}] Couldn't delete ports from bridge: '${bridgeName}' because: ${jsonObjects.error}`
|
||||
)
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug('Ports deleted from bridge', {
|
||||
nPorts: jsonObjects[0].result[0].count,
|
||||
bridge: bridge.name,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Deleted ${jsonObjects[0].result[0].count} ports from bridge: '${bridgeName}'`
|
||||
)
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
@@ -296,9 +235,9 @@ export class OvsdbClient {
|
||||
for (let i = pos; i < data.length; ++i) {
|
||||
const c = data.charAt(i)
|
||||
if (c === '{') {
|
||||
++depth
|
||||
depth++
|
||||
} else if (c === '}') {
|
||||
--depth
|
||||
depth--
|
||||
if (depth === 0) {
|
||||
const object = JSON.parse(buffer + data.substr(0, i + 1))
|
||||
objects.push(object)
|
||||
@@ -316,9 +255,13 @@ export class OvsdbClient {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeForNetwork(network, socket) {
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
const where = [
|
||||
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
|
||||
[
|
||||
'external_ids',
|
||||
'includes',
|
||||
['map', [['xs-network-uuids', networkUuid]]],
|
||||
],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
@@ -326,27 +269,34 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
log.error('No bridge found for network', {
|
||||
network: network.name_label,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return {}
|
||||
if (selectResult == null) {
|
||||
return [null, null]
|
||||
}
|
||||
|
||||
return { uuid: selectResult._uuid[1], name: selectResult.name }
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Found bridge: '${bridgeName}' for network: '${networkName}'`
|
||||
)
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
|
||||
const ports = await this._getBridgePorts(bridge, socket)
|
||||
if (ports === undefined) {
|
||||
return false
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
return
|
||||
}
|
||||
|
||||
for (const port of ports) {
|
||||
const portUuid = port[1]
|
||||
const interfaces = await this._getPortInterfaces(portUuid, socket)
|
||||
if (interfaces === undefined) {
|
||||
if (interfaces == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -357,7 +307,7 @@ export class OvsdbClient {
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (hasRemote) {
|
||||
if (hasRemote === true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -366,11 +316,11 @@ export class OvsdbClient {
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridge, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.ports[0] === 'set'
|
||||
@@ -386,8 +336,8 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
return
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
@@ -403,7 +353,7 @@ export class OvsdbClient {
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult === undefined) {
|
||||
if (selectResult == null) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -428,36 +378,28 @@ export class OvsdbClient {
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects === undefined) {
|
||||
if (jsonObjects == null) {
|
||||
return
|
||||
}
|
||||
const jsonResult = jsonObjects[0].result[0]
|
||||
if (jsonResult.error !== undefined) {
|
||||
log.error('Error while selecting columns', {
|
||||
error: jsonResult.error,
|
||||
details: jsonResult.details,
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
if (jsonResult.error != null) {
|
||||
log.error(
|
||||
`[${this._host.name_label}] Couldn't retrieve: '${columns}' in: '${table}' because: ${jsonResult.error}: ${jsonResult.details}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (jsonResult.rows.length === 0) {
|
||||
log.error('No result for select', {
|
||||
columns,
|
||||
table,
|
||||
where,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
log.error(
|
||||
`[${this._host.name_label}] No '${columns}' found in: '${table}' where: '${where}'`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
`[${this.host.name_label}] There should be exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
`[${this._host.name_label}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
)
|
||||
|
||||
return jsonResult.rows[0]
|
||||
@@ -465,7 +407,9 @@ export class OvsdbClient {
|
||||
|
||||
async _sendOvsdbTransaction(params, socket) {
|
||||
const stream = socket
|
||||
const requestId = ++this._requestId
|
||||
|
||||
const requestId = this._requestID
|
||||
++this._requestID
|
||||
const req = {
|
||||
id: requestId,
|
||||
method: 'transact',
|
||||
@@ -475,11 +419,10 @@ export class OvsdbClient {
|
||||
try {
|
||||
stream.write(JSON.stringify(req))
|
||||
} catch (error) {
|
||||
log.error('Error while writing into stream', {
|
||||
error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
log.error(
|
||||
`[${this._host.name_label}] Error while writing into stream: ${error}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
let result
|
||||
@@ -489,11 +432,10 @@ export class OvsdbClient {
|
||||
try {
|
||||
result = await fromEvent(stream, 'data', {})
|
||||
} catch (error) {
|
||||
log.error('Error while waiting for stream data', {
|
||||
error,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
return
|
||||
log.error(
|
||||
`[${this._host.name_label}] Error while waiting for stream data: ${error}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
jsonObjects = this._parseJson(result)
|
||||
@@ -510,7 +452,7 @@ export class OvsdbClient {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this.host.address,
|
||||
host: this._host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
@@ -520,20 +462,18 @@ export class OvsdbClient {
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect', {})
|
||||
} catch (error) {
|
||||
log.error('TLS connection failed', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.error(
|
||||
`[${this._host.name_label}] TLS connection failed because: ${error}: ${error.code}`
|
||||
)
|
||||
throw error
|
||||
}
|
||||
|
||||
log.debug(`[${this._host.name_label}] TLS connection successful`)
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error('Socket error', {
|
||||
error,
|
||||
code: error.code,
|
||||
host: this.host.name_label,
|
||||
})
|
||||
log.error(
|
||||
`[${this._host.name_label}] OVSDB client socket error: ${error} with code: ${error.code}`
|
||||
)
|
||||
})
|
||||
|
||||
return socket
|
||||
@@ -1,202 +0,0 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { filter, find, forOwn, map, sample } from 'lodash'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:private-network')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
|
||||
const createPassword = () =>
|
||||
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class PrivateNetwork {
|
||||
constructor(controller, uuid) {
|
||||
this.controller = controller
|
||||
this.uuid = uuid
|
||||
this.networks = {}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addHost(host) {
|
||||
if (host.$ref === this.center?.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
log.error('No OVSDB client found', {
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const centerClient = this.controller.ovsdbClients[this.center.$ref]
|
||||
if (centerClient === undefined) {
|
||||
log.error('No OVSDB client found for star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
host: this.center.name_label,
|
||||
pool: this.center.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
const centerNetwork = this.networks[this.center.$pool.uuid]
|
||||
const otherConfig = network.other_config
|
||||
const encapsulation =
|
||||
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
|
||||
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
|
||||
const password =
|
||||
otherConfig['xo:sdn-controller:encrypted'] === 'true'
|
||||
? createPassword()
|
||||
: undefined
|
||||
|
||||
let bridgeName
|
||||
try {
|
||||
;[bridgeName] = await Promise.all([
|
||||
hostClient.addInterfaceAndPort(
|
||||
network,
|
||||
centerClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
centerClient.addInterfaceAndPort(
|
||||
centerNetwork,
|
||||
hostClient.host.address,
|
||||
encapsulation,
|
||||
vni,
|
||||
password,
|
||||
this.uuid
|
||||
),
|
||||
])
|
||||
} catch (error) {
|
||||
log.error('Error while connecting host to private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
return
|
||||
}
|
||||
log.info('Host added', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: host.$pool.name_label,
|
||||
})
|
||||
|
||||
return bridgeName
|
||||
}
|
||||
|
||||
addNetwork(network) {
|
||||
this.networks[network.$pool.uuid] = network
|
||||
log.info('Adding network', {
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
if (this.center === undefined) {
|
||||
return this.electNewCenter()
|
||||
}
|
||||
|
||||
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
|
||||
return Promise.all(
|
||||
map(hosts, async host => {
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
await this.addHost(host)
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async electNewCenter() {
|
||||
delete this.center
|
||||
|
||||
// TODO: make it random
|
||||
const hosts = this._getHosts()
|
||||
for (const host of hosts) {
|
||||
const pif = find(host.$PIFs, {
|
||||
network: this.networks[host.$pool.uuid].$ref,
|
||||
})
|
||||
if (pif?.currently_attached && host.$metrics.live) {
|
||||
this.center = host
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (this.center === undefined) {
|
||||
log.error('No available host to elect new star-center', {
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
await this._reset()
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(map(hosts, host => this.addHost(host)))
|
||||
|
||||
log.info('New star-center elected', {
|
||||
center: this.center.name_label,
|
||||
privateNetwork: this.uuid,
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
getPools() {
|
||||
const pools = []
|
||||
forOwn(this.networks, network => {
|
||||
pools.push(network.$pool)
|
||||
})
|
||||
return pools
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_reset() {
|
||||
return Promise.all(
|
||||
map(this._getHosts(), async host => {
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = this.controller.ovsdbClients[host.$ref]
|
||||
if (hostClient === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const network = this.networks[host.$pool.uuid]
|
||||
try {
|
||||
await hostClient.resetForNetwork(network, this.uuid)
|
||||
} catch (error) {
|
||||
log.error('Error while resetting private network', {
|
||||
error,
|
||||
privateNetwork: this.uuid,
|
||||
network: network.name_label,
|
||||
host: host.name_label,
|
||||
pool: network.$pool.name_label,
|
||||
})
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_getHosts() {
|
||||
const hosts = []
|
||||
forOwn(this.networks, network => {
|
||||
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
const pkg = require('./package.json')
|
||||
|
||||
// `xo-server-test` is a special package which has no dev dependencies but our
|
||||
// babel config generator only looks in `devDependencies`.
|
||||
require('assert').strictEqual(pkg.devDependencies, undefined)
|
||||
pkg.devDependencies = pkg.dependencies
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(pkg)
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
@@ -1,160 +0,0 @@
|
||||
# xo-server-test
|
||||
|
||||
> Test client for Xo-Server
|
||||
|
||||
Tests are ran sequentially to avoid concurrency issues.
|
||||
|
||||
## Adding a test
|
||||
|
||||
### Organization
|
||||
|
||||
```
|
||||
src
|
||||
├─ user
|
||||
| ├─ __snapshots__
|
||||
| | └─ index.spec.js.snap
|
||||
| └─ index.spec.js
|
||||
├─ job
|
||||
| └─ index.spec.js
|
||||
├─ issues
|
||||
¦ └─ index.spec.js
|
||||
¦
|
||||
├─ _xoConnection.js
|
||||
└─ util.js
|
||||
```
|
||||
|
||||
The tests can describe:
|
||||
|
||||
- XO methods or scenarios:
|
||||
|
||||
`src/user/index.js`
|
||||
```js
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe("user", () => {
|
||||
|
||||
// testing a method
|
||||
describe(".set()", () => {
|
||||
it("sets an email", async () => {
|
||||
// some tests using xo methods and helpers from _xoConnection.js
|
||||
const id = await xo.createTempUser(SIMPLE_USER);
|
||||
expect(await xo.call("user.set", params)).toBe(true);
|
||||
expect(await xo.getUser(id)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// testing a scenario
|
||||
test("create two users, modify a user email to be the same with the other and fail trying to connect them", () => {
|
||||
/* some tests */
|
||||
});
|
||||
|
||||
});
|
||||
```
|
||||
- issues
|
||||
|
||||
`src/issues/index.js`
|
||||
```js
|
||||
describe("issue", () => {
|
||||
test("5454", () => {
|
||||
/* some tests */
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
### Best practices
|
||||
|
||||
- The test environment must remain the same before and after each test:
|
||||
* each resource created must be deleted
|
||||
* existing resources should not be altered
|
||||
|
||||
- Make a sentence for the title of the test. It must be clear and consistent.
|
||||
|
||||
- If the feature you want to test is not implemented : write it and skip it, using `it.skip()`.
|
||||
|
||||
- Take values that cover the maximum of testing possibilities.
|
||||
|
||||
- If you make tests which keep track of large object, it is better to use snapshots.
|
||||
|
||||
- `_xoConnection.js` contains helpers to create temporary resources and to interface with XO.
|
||||
You can use it if you need to create resources which will be automatically deleted after the test:
|
||||
```javascript
|
||||
import xo from "../_xoConnection";
|
||||
|
||||
describe(".create()", () => {
|
||||
it("creates a user without permission", async () => {
|
||||
// The user will be deleted automatically at the end of the test
|
||||
const userId = await xo.createTempUser({
|
||||
email: "wayne1@vates.fr",
|
||||
password: "batman1",
|
||||
});
|
||||
expect(await xo.getUser(userId)).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
The available helpers:
|
||||
* `createTempUser(params)`
|
||||
* `getUser(id)`
|
||||
* `createTempJob(params)`
|
||||
* `createTempBackupNgJob(params)`
|
||||
* `createTempVm(params)`
|
||||
* `getSchedule(predicate)`
|
||||
|
||||
## Usage
|
||||
|
||||
- Before running the tests, you have to create a config file for xo-server-test.
|
||||
```
|
||||
> cp sample.config.toml ~/.config/xo-server-test/config.toml
|
||||
```
|
||||
And complete it.
|
||||
|
||||
- To run the tests:
|
||||
```
|
||||
> npm ci
|
||||
> yarn test
|
||||
```
|
||||
|
||||
You get all the test suites passed (`PASS`) or failed (`FAIL`).
|
||||
```
|
||||
> yarn test
|
||||
yarn run v1.9.4
|
||||
$ jest
|
||||
PASS src/user/user.spec.js
|
||||
PASS src/job/job.spec.js
|
||||
PASS src/backupNg/backupNg.spec.js
|
||||
|
||||
Test Suites: 3 passed, 3 total
|
||||
Tests: 2 skipped, 36 passed, 38 total
|
||||
Snapshots: 35 passed, 35 total
|
||||
Time: 7.257s, estimated 8s
|
||||
Ran all test suites.
|
||||
Done in 7.92s.
|
||||
```
|
||||
|
||||
- You can run only tests related to changed files, and review the failed output by using: `> yarn test --watch`
|
||||
|
||||
- ⚠ Warning: snapshots ⚠
|
||||
After each run of the tests, check that snapshots are not inadvertently modified.
|
||||
|
||||
- ⚠ Jest known issue ⚠
|
||||
If a test timeout is triggered the next async tests can fail, it's due to an inadvertently modified snapshots.
|
||||
As a workaround, you can clean your git working tree and re-run jest using a large timeout: `> yarn test --testTimeout=100000`
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
ISC © [Vates SAS](http://vates.fr)
|
||||
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server-test",
|
||||
"version": "0.0.0",
|
||||
"license": "ISC",
|
||||
"description": "Test client for Xo-Server",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-test",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Julien Fontanet",
|
||||
"email": "julien.fontanet@isonoe.net"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
"@babel/core": "^7.1.6",
|
||||
"@babel/plugin-proposal-decorators": "^7.4.0",
|
||||
"@babel/preset-env": "^7.1.6",
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"app-conf": "^0.7.0",
|
||||
"babel-plugin-lodash": "^3.2.11",
|
||||
"golike-defer": "^0.4.1",
|
||||
"jest": "^24.8.0",
|
||||
"lodash": "^4.17.11",
|
||||
"promise-toolbox": "^0.14.0",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"scripts": {
|
||||
"dev-test": "jest --bail --watch",
|
||||
"test": "jest"
|
||||
},
|
||||
"jest": {
|
||||
"modulePathIgnorePatterns": [
|
||||
"<rootDir>/src/old-tests"
|
||||
],
|
||||
"testEnvironment": "node",
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"maxConcurrency": 1
|
||||
}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
[xoConnection]
|
||||
url = ''
|
||||
email = ''
|
||||
password = ''
|
||||
|
||||
[pools]
|
||||
default = ''
|
||||
|
||||
[servers]
|
||||
[servers.default]
|
||||
username = ''
|
||||
password = ''
|
||||
host = ''
|
||||
|
||||
[vms]
|
||||
default = ''
|
||||
withOsAndXenTools = ''
|
||||
# vmToBackup = ''
|
||||
|
||||
[templates]
|
||||
default = ''
|
||||
templateWithoutDisks = ''
|
||||
|
||||
[srs]
|
||||
default = ''
|
||||
|
||||
[remotes]
|
||||
default = { name = '', url = '' }
|
||||
remote1 = { name = '', url = '' }
|
||||
# remote2 = { name = '', url = '' }
|
||||
@@ -1,13 +0,0 @@
|
||||
import appConf from 'app-conf'
|
||||
import path from 'path'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
let config
|
||||
export { config as default }
|
||||
|
||||
beforeAll(async () => {
|
||||
config = await appConf.load('xo-server-test', {
|
||||
appDir: path.join(__dirname, '..'),
|
||||
})
|
||||
})
|
||||
@@ -1,6 +0,0 @@
|
||||
export const getDefaultName = () => `xo-server-test ${new Date().toISOString()}`
|
||||
|
||||
export const getDefaultSchedule = () => ({
|
||||
name: getDefaultName(),
|
||||
cron: '0 * * * * *',
|
||||
})
|
||||
@@ -1,6 +0,0 @@
|
||||
const randomId = () =>
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
|
||||
export { randomId as default }
|
||||
@@ -1,294 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
import defer from 'golike-defer'
|
||||
import Xo from 'xo-lib'
|
||||
import XoCollection from 'xo-collection'
|
||||
import { defaultsDeep, find, forOwn, pick } from 'lodash'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import config from './_config'
|
||||
import { getDefaultName } from './_defaultValues'
|
||||
|
||||
class XoConnection extends Xo {
|
||||
constructor(opts) {
|
||||
super(opts)
|
||||
|
||||
const objects = (this._objects = new XoCollection())
|
||||
const watchers = (this._watchers = {})
|
||||
this._tempResourceDisposers = []
|
||||
this._durableResourceDisposers = []
|
||||
|
||||
this.on('notification', ({ method, params }) => {
|
||||
if (method !== 'all') {
|
||||
return
|
||||
}
|
||||
|
||||
const fn = params.type === 'exit' ? objects.unset : objects.set
|
||||
forOwn(params.items, (item, id) => {
|
||||
fn.call(objects, id, item)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(item)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
get objects() {
|
||||
return this._objects
|
||||
}
|
||||
|
||||
async _fetchObjects() {
|
||||
const { _objects: objects, _watchers: watchers } = this
|
||||
forOwn(await this.call('xo.getAllObjects'), (object, id) => {
|
||||
objects.set(id, object)
|
||||
|
||||
const watcher = watchers[id]
|
||||
if (watcher !== undefined) {
|
||||
watcher(object)
|
||||
delete watchers[id]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: integrate in xo-lib.
|
||||
waitObject(id) {
|
||||
return new Promise(resolve => {
|
||||
this._watchers[id] = resolve
|
||||
}) // FIXME: work with multiple listeners.
|
||||
}
|
||||
|
||||
async getOrWaitObject(id) {
|
||||
const object = this._objects.all[id]
|
||||
if (object !== undefined) {
|
||||
return object
|
||||
}
|
||||
return this.waitObject(id)
|
||||
}
|
||||
|
||||
@defer
|
||||
async connect(
|
||||
$defer,
|
||||
credentials = pick(config.xoConnection, 'email', 'password')
|
||||
) {
|
||||
await this.open()
|
||||
$defer.onFailure(() => this.close())
|
||||
|
||||
await this.signIn(credentials)
|
||||
await this._fetchObjects()
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
async waitObjectState(id, predicate) {
|
||||
let obj = this._objects.all[id]
|
||||
while (true) {
|
||||
try {
|
||||
await predicate(obj)
|
||||
return obj
|
||||
} catch (_) {}
|
||||
// If failed, wait for next object state/update and retry.
|
||||
obj = await this.waitObject(id)
|
||||
}
|
||||
}
|
||||
|
||||
async createTempUser(params) {
|
||||
const id = await this.call('user.create', params)
|
||||
this._tempResourceDisposers.push('user.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async getUser(id) {
|
||||
return find(await super.call('user.getAll'), { id })
|
||||
}
|
||||
|
||||
async createTempJob(params) {
|
||||
const id = await this.call('job.create', { job: params })
|
||||
this._tempResourceDisposers.push('job.delete', { id })
|
||||
return id
|
||||
}
|
||||
|
||||
async createTempBackupNgJob(params) {
|
||||
// mutate and inject default values
|
||||
defaultsDeep(params, {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
settings: {
|
||||
'': {
|
||||
// it must be enabled because the XAPI might be not able to coalesce VDIs
|
||||
// as fast as the tests run
|
||||
//
|
||||
// see https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
|
||||
bypassVdiChainsCheck: true,
|
||||
|
||||
// it must be 'never' to avoid race conditions with the plugin `backup-reports`
|
||||
reportWhen: 'never',
|
||||
},
|
||||
},
|
||||
})
|
||||
const id = await this.call('backupNg.createJob', params)
|
||||
this._tempResourceDisposers.push('backupNg.deleteJob', { id })
|
||||
return this.call('backupNg.getJob', { id })
|
||||
}
|
||||
|
||||
async createTempNetwork(params) {
|
||||
const id = await this.call('network.create', {
|
||||
name: 'XO Test',
|
||||
pool: config.pools.default,
|
||||
...params,
|
||||
})
|
||||
this._tempResourceDisposers.push('network.delete', { id })
|
||||
return this.getOrWaitObject(id)
|
||||
}
|
||||
|
||||
async createTempVm(params) {
|
||||
const id = await this.call('vm.create', {
|
||||
name_label: getDefaultName(),
|
||||
template: config.templates.templateWithoutDisks,
|
||||
...params,
|
||||
})
|
||||
this._tempResourceDisposers.push('vm.delete', { id })
|
||||
return this.waitObjectState(id, vm => {
|
||||
if (vm.type !== 'VM') throw new Error('retry')
|
||||
})
|
||||
}
|
||||
|
||||
async startTempVm(id, params, withXenTools = false) {
|
||||
await this.call('vm.start', { id, ...params })
|
||||
this._tempResourceDisposers.push('vm.stop', { id, force: true })
|
||||
return this.waitObjectState(id, vm => {
|
||||
if (
|
||||
vm.power_state !== 'Running' ||
|
||||
(withXenTools && vm.xenTools === false)
|
||||
) {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async createTempRemote(params) {
|
||||
const remote = await this.call('remote.create', params)
|
||||
this._tempResourceDisposers.push('remote.delete', { id: remote.id })
|
||||
return remote
|
||||
}
|
||||
|
||||
async createTempServer(params) {
|
||||
const servers = await this.call('server.getAll')
|
||||
const server = servers.find(server => server.host === params.host)
|
||||
if (server !== undefined) {
|
||||
if (server.status === 'disconnected') {
|
||||
await this.call('server.enable', { id: server.id })
|
||||
this._durableResourceDisposers.push('server.disable', { id: server.id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const id = await this.call('server.add', {
|
||||
...params,
|
||||
allowUnauthorized: true,
|
||||
autoConnect: false,
|
||||
})
|
||||
this._durableResourceDisposers.push('server.remove', { id })
|
||||
await this.call('server.enable', { id })
|
||||
await fromEvent(this._objects, 'finish')
|
||||
}
|
||||
|
||||
async getSchedule(predicate) {
|
||||
return find(await this.call('schedule.getAll'), predicate)
|
||||
}
|
||||
|
||||
async runBackupJob(jobId, scheduleId, { remotes, nExecutions = 1 }) {
|
||||
for (let i = 0; i < nExecutions; i++) {
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: scheduleId })
|
||||
}
|
||||
const backups = {}
|
||||
if (remotes !== undefined) {
|
||||
const backupsByRemote = await xo.call('backupNg.listVmBackups', {
|
||||
remotes,
|
||||
})
|
||||
forOwn(backupsByRemote, (backupsByVm, remoteId) => {
|
||||
backups[remoteId] = []
|
||||
forOwn(backupsByVm, vmBackups => {
|
||||
vmBackups.forEach(
|
||||
({ jobId: backupJobId, scheduleId: backupScheduleId, id }) => {
|
||||
if (jobId === backupJobId && scheduleId === backupScheduleId) {
|
||||
this._tempResourceDisposers.push('backupNg.deleteVmBackup', {
|
||||
id,
|
||||
})
|
||||
backups[remoteId].push(id)
|
||||
}
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
forOwn(this.objects.all, (obj, id) => {
|
||||
if (
|
||||
obj.other !== undefined &&
|
||||
obj.other['xo:backup:job'] === jobId &&
|
||||
obj.other['xo:backup:schedule'] === scheduleId
|
||||
) {
|
||||
this._tempResourceDisposers.push('vm.delete', {
|
||||
id,
|
||||
})
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
getBackupLogs(filter) {
|
||||
return this.call('backupNg.getLogs', { _forceRefresh: true, ...filter })
|
||||
}
|
||||
|
||||
async _cleanDisposers(disposers) {
|
||||
for (let n = disposers.length - 1; n > 0; ) {
|
||||
const params = disposers[n--]
|
||||
const method = disposers[n--]
|
||||
await this.call(method, params).catch(error => {
|
||||
console.warn('deleteTempResources', method, params, error)
|
||||
})
|
||||
}
|
||||
disposers.length = 0
|
||||
}
|
||||
|
||||
async deleteTempResources() {
|
||||
await this._cleanDisposers(this._tempResourceDisposers)
|
||||
}
|
||||
|
||||
async deleteDurableResources() {
|
||||
await this._cleanDisposers(this._durableResourceDisposers)
|
||||
}
|
||||
}
|
||||
|
||||
const getConnection = credentials => {
|
||||
const xo = new XoConnection({ url: config.xoConnection.url })
|
||||
return xo.connect(credentials)
|
||||
}
|
||||
|
||||
let xo
|
||||
beforeAll(async () => {
|
||||
// TOFIX: stop tests if the connection is not established properly and show the error
|
||||
xo = await getConnection()
|
||||
})
|
||||
afterAll(async () => {
|
||||
await xo.deleteDurableResources()
|
||||
await xo.close()
|
||||
xo = null
|
||||
})
|
||||
afterEach(() => xo.deleteTempResources())
|
||||
|
||||
export { xo as default }
|
||||
|
||||
export const testConnection = ({ credentials }) =>
|
||||
getConnection(credentials).then(connection => connection.close())
|
||||
|
||||
export const testWithOtherConnection = defer(
|
||||
async ($defer, credentials, functionToExecute) => {
|
||||
const xoUser = await getConnection(credentials)
|
||||
$defer(() => xoUser.close())
|
||||
await functionToExecute(xoUser)
|
||||
}
|
||||
)
|
||||
@@ -1,464 +0,0 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "no disks found",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "skipped",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with no matching VMs 1`] = `[JsonRpcError: unknown error from the peer]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job with non-existent vm 1`] = `
|
||||
Array [
|
||||
Object {
|
||||
"data": Object {
|
||||
"vms": Array [
|
||||
"non-existent-id",
|
||||
],
|
||||
},
|
||||
"message": "missingVms",
|
||||
},
|
||||
]
|
||||
`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"message": "copy, export and snapshot retentions cannot both be 0",
|
||||
"name": "Error",
|
||||
"stack": Any<String>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "failure",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
|
||||
Object {
|
||||
"data": Any<Object>,
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 11`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 12`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": false,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"isFull": true,
|
||||
"type": "remote",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 23`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 24`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"result": Object {
|
||||
"size": Any<Number>,
|
||||
},
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
|
||||
Object {
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": "snapshot",
|
||||
"result": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
|
||||
Object {
|
||||
"data": Object {
|
||||
"id": Any<String>,
|
||||
"type": "VM",
|
||||
},
|
||||
"end": Any<Number>,
|
||||
"id": Any<String>,
|
||||
"message": Any<String>,
|
||||
"start": Any<Number>,
|
||||
"status": "success",
|
||||
}
|
||||
`;
|
||||
@@ -1,693 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { forOwn } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import config from '../_config'
|
||||
import randomId from '../_randomId'
|
||||
import xo from '../_xoConnection'
|
||||
import { getDefaultName, getDefaultSchedule } from '../_defaultValues'
|
||||
|
||||
const validateBackupJob = (jobInput, jobOutput, createdSchedule) => {
|
||||
const expectedObj = {
|
||||
id: expect.any(String),
|
||||
mode: jobInput.mode,
|
||||
name: jobInput.name,
|
||||
type: 'backup',
|
||||
settings: {
|
||||
'': jobInput.settings[''],
|
||||
},
|
||||
userId: xo._user.id,
|
||||
vms: jobInput.vms,
|
||||
}
|
||||
|
||||
const schedules = jobInput.schedules
|
||||
if (schedules !== undefined) {
|
||||
const scheduleTmpId = Object.keys(schedules)[0]
|
||||
expect(createdSchedule).toEqual({
|
||||
...schedules[scheduleTmpId],
|
||||
enabled: false,
|
||||
id: expect.any(String),
|
||||
jobId: jobOutput.id,
|
||||
})
|
||||
|
||||
expectedObj.settings[createdSchedule.id] = jobInput.settings[scheduleTmpId]
|
||||
}
|
||||
|
||||
expect(jobOutput).toEqual(expectedObj)
|
||||
}
|
||||
|
||||
const validateRootTask = (log, expected) =>
|
||||
expect(log).toEqual({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: 'backup',
|
||||
start: expect.any(Number),
|
||||
...expected,
|
||||
})
|
||||
|
||||
const validateVmTask = (task, vmId, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
expect(task.data.id).toBe(vmId)
|
||||
}
|
||||
|
||||
const validateSnapshotTask = (task, props) =>
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
result: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
|
||||
const validateExportTask = (task, srOrRemoteIds, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
expect(srOrRemoteIds).toContain(task.data.id)
|
||||
}
|
||||
|
||||
const validateOperationTask = (task, props) => {
|
||||
expect(task).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
...props,
|
||||
})
|
||||
}
|
||||
|
||||
describe('backupNg', () => {
|
||||
describe('.createJob() :', () => {
|
||||
it('creates a new backup job without schedules', async () => {
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(jobInput, jobOutput)
|
||||
})
|
||||
|
||||
it('creates a new backup job with schedules', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const jobInput = {
|
||||
mode: 'full',
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const jobOutput = await xo.createTempBackupNgJob(jobInput)
|
||||
validateBackupJob(
|
||||
jobInput,
|
||||
jobOutput,
|
||||
await xo.getSchedule({ jobId: jobOutput.id })
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes a backup job', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const jobId = await xo.call('backupNg.createJob', {
|
||||
mode: 'full',
|
||||
name: getDefaultName(),
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.deleteJob', { id: jobId })
|
||||
|
||||
let isRejectedJobErrorValid = false
|
||||
await xo.call('backupNg.getJob', { id: jobId }).catch(error => {
|
||||
isRejectedJobErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedJobErrorValid).toBe(true)
|
||||
|
||||
let isRejectedScheduleErrorValid = false
|
||||
await xo.call('schedule.get', { id: schedule.id }).catch(error => {
|
||||
isRejectedScheduleErrorValid = noSuchObject.is(error)
|
||||
})
|
||||
expect(isRejectedScheduleErrorValid).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runJob() :', () => {
|
||||
it('fails trying to run a backup job without schedule', async () => {
|
||||
const { id } = await xo.createTempBackupNgJob({
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
})
|
||||
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with no matching VMs', async () => {
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
name: 'test-vm-backupNg',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await expect(
|
||||
xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with non-existent vm', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
const { id: jobId } = await xo.createTempBackupNgJob({
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: 'non-existent-id',
|
||||
},
|
||||
})
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
const [log] = await xo.getBackupLogs({
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(log.warnings).toMatchSnapshot()
|
||||
})
|
||||
|
||||
it('fails trying to run a backup job with a VM without disks', async () => {
|
||||
jest.setTimeout(8e3)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: vmIdWithoutDisks } = await xo.createTempVm({
|
||||
name_description: 'Creating a vm without disks',
|
||||
template: config.templates.templateWithoutDisks,
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const jobInput = {
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 1 },
|
||||
},
|
||||
vms: {
|
||||
id: vmIdWithoutDisks,
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [vmTask],
|
||||
...log
|
||||
},
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'skipped',
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask.data.id).toBe(vmIdWithoutDisks)
|
||||
})
|
||||
|
||||
it('fails trying to run backup job without retentions', async () => {
|
||||
jest.setTimeout(7e3)
|
||||
const scheduleTempId = randomId()
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const jobInput = {
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: {},
|
||||
},
|
||||
srs: {
|
||||
id: config.srs.default,
|
||||
},
|
||||
vms: {
|
||||
id: config.vms.default,
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [task],
|
||||
...log
|
||||
},
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'failure',
|
||||
})
|
||||
|
||||
expect(task).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
result: {
|
||||
stack: expect.any(String),
|
||||
},
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(task.data.id).toBe(config.vms.default)
|
||||
})
|
||||
})
|
||||
|
||||
test('execute three times a rolling snapshot with 2 as retention & revert to an old state', async () => {
|
||||
jest.setTimeout(6e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
let vm = await xo.createTempVm({
|
||||
name_description: 'Creating a temporary vm',
|
||||
template: config.templates.default,
|
||||
VDIs: [
|
||||
{
|
||||
size: 1,
|
||||
SR: config.srs.default,
|
||||
type: 'user',
|
||||
},
|
||||
],
|
||||
})
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const jobInput = {
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
[scheduleTempId]: { snapshotRetention: 2 },
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
|
||||
vm = await xo.waitObjectState(vm.id, ({ snapshots }) => {
|
||||
// Test on updating snapshots.
|
||||
expect(snapshots).not.toEqual(vm.snapshots)
|
||||
})
|
||||
}
|
||||
|
||||
// Test on the retention, how many snapshots should be saved.
|
||||
expect(vm.snapshots.length).toBe(2)
|
||||
|
||||
const newVideoram = 16
|
||||
await xo.call('vm.set', { id: vm.id, videoram: newVideoram })
|
||||
await xo.waitObjectState(vm.id, ({ videoram }) => {
|
||||
expect(videoram).toBe(newVideoram.toString())
|
||||
})
|
||||
|
||||
await xo.call('vm.revert', {
|
||||
snapshot: vm.snapshots[0],
|
||||
})
|
||||
|
||||
await xo.waitObjectState(vm.id, ({ videoram }) => {
|
||||
expect(videoram).toBe(vm.videoram)
|
||||
})
|
||||
|
||||
const [
|
||||
{
|
||||
tasks: [{ tasks: subTasks, ...vmTask }],
|
||||
...log
|
||||
},
|
||||
] = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const subTaskSnapshot = subTasks.find(
|
||||
({ message }) => message === 'snapshot'
|
||||
)
|
||||
expect(subTaskSnapshot).toMatchSnapshot({
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
result: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
|
||||
expect(vmTask).toMatchSnapshot({
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
},
|
||||
end: expect.any(Number),
|
||||
id: expect.any(String),
|
||||
message: expect.any(String),
|
||||
start: expect.any(Number),
|
||||
})
|
||||
expect(vmTask.data.id).toBe(vm.id)
|
||||
})
|
||||
|
||||
test('execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval', async () => {
|
||||
jest.setTimeout(12e5)
|
||||
const {
|
||||
vms: { default: defaultVm, vmToBackup = defaultVm },
|
||||
remotes: { default: defaultRemote, remote1, remote2 = defaultRemote },
|
||||
servers: { default: defaultServer },
|
||||
} = config
|
||||
|
||||
expect(vmToBackup).not.toBe(undefined)
|
||||
expect(remote1).not.toBe(undefined)
|
||||
expect(remote2).not.toBe(undefined)
|
||||
|
||||
await xo.createTempServer(defaultServer)
|
||||
const { id: remoteId1 } = await xo.createTempRemote(remote1)
|
||||
const { id: remoteId2 } = await xo.createTempRemote(remote2)
|
||||
const remotes = [remoteId1, remoteId2]
|
||||
|
||||
const exportRetention = 2
|
||||
const fullInterval = 2
|
||||
const scheduleTempId = randomId()
|
||||
const jobInput = {
|
||||
mode: 'delta',
|
||||
remotes: {
|
||||
id: {
|
||||
__or: remotes,
|
||||
},
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
fullInterval,
|
||||
},
|
||||
[remoteId1]: { deleteFirst: true },
|
||||
[scheduleTempId]: { exportRetention },
|
||||
},
|
||||
vms: {
|
||||
id: vmToBackup,
|
||||
},
|
||||
}
|
||||
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId })
|
||||
expect(typeof schedule).toBe('object')
|
||||
|
||||
const nExecutions = 3
|
||||
const backupsByRemote = await xo.runBackupJob(jobId, schedule.id, {
|
||||
remotes,
|
||||
nExecutions,
|
||||
})
|
||||
forOwn(backupsByRemote, backups =>
|
||||
expect(backups.length).toBe(exportRetention)
|
||||
)
|
||||
|
||||
const backupLogs = await xo.getBackupLogs({
|
||||
jobId,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(backupLogs.length).toBe(nExecutions)
|
||||
|
||||
backupLogs.forEach(({ tasks = [], ...log }, key) => {
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: jobInput.mode,
|
||||
reportWhen: jobInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId,
|
||||
jobName: jobInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
const numberOfTasks = {
|
||||
export: 0,
|
||||
merge: 0,
|
||||
snapshot: 0,
|
||||
transfer: 0,
|
||||
vm: 0,
|
||||
}
|
||||
tasks.forEach(({ tasks = [], ...vmTask }) => {
|
||||
if (vmTask.data !== undefined && vmTask.data.type === 'VM') {
|
||||
validateVmTask(vmTask, vmToBackup, { status: 'success' })
|
||||
numberOfTasks.vm++
|
||||
tasks.forEach(({ tasks = [], ...subTask }) => {
|
||||
if (subTask.message === 'snapshot') {
|
||||
validateSnapshotTask(subTask, { status: 'success' })
|
||||
numberOfTasks.snapshot++
|
||||
}
|
||||
if (subTask.message === 'export') {
|
||||
validateExportTask(subTask, remotes, {
|
||||
data: {
|
||||
id: expect.any(String),
|
||||
isFull: key % fullInterval === 0,
|
||||
type: 'remote',
|
||||
},
|
||||
status: 'success',
|
||||
})
|
||||
numberOfTasks.export++
|
||||
let mergeTaskKey, transferTaskKey
|
||||
tasks.forEach((operationTask, key) => {
|
||||
if (
|
||||
operationTask.message === 'transfer' ||
|
||||
operationTask.message === 'merge'
|
||||
) {
|
||||
validateOperationTask(operationTask, {
|
||||
result: { size: expect.any(Number) },
|
||||
status: 'success',
|
||||
})
|
||||
if (operationTask.message === 'transfer') {
|
||||
mergeTaskKey = key
|
||||
numberOfTasks.merge++
|
||||
} else {
|
||||
transferTaskKey = key
|
||||
numberOfTasks.transfer++
|
||||
}
|
||||
}
|
||||
})
|
||||
expect(
|
||||
subTask.data.id === remoteId1
|
||||
? mergeTaskKey > transferTaskKey
|
||||
: mergeTaskKey < transferTaskKey
|
||||
).toBe(true)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
expect(numberOfTasks).toEqual({
|
||||
export: 2,
|
||||
merge: 2,
|
||||
snapshot: 1,
|
||||
transfer: 2,
|
||||
vm: 1,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
test('create and execute backup with enabled offline backup', async () => {
|
||||
const vm = xo.objects.all[config.vms.withOsAndXenTools]
|
||||
if (vm.power_state !== 'Running') {
|
||||
await xo.startTempVm(vm.id, { force: true }, true)
|
||||
}
|
||||
|
||||
const scheduleTempId = randomId()
|
||||
const srId = config.srs.default
|
||||
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
|
||||
const backupInput = {
|
||||
mode: 'full',
|
||||
remotes: {
|
||||
id: remoteId,
|
||||
},
|
||||
schedules: {
|
||||
[scheduleTempId]: getDefaultSchedule(),
|
||||
},
|
||||
settings: {
|
||||
'': {
|
||||
offlineBackup: true,
|
||||
},
|
||||
[scheduleTempId]: {
|
||||
copyRetention: 1,
|
||||
exportRetention: 1,
|
||||
},
|
||||
},
|
||||
srs: {
|
||||
id: srId,
|
||||
},
|
||||
vms: {
|
||||
id: vm.id,
|
||||
},
|
||||
}
|
||||
const backup = await xo.createTempBackupNgJob(backupInput)
|
||||
expect(backup.settings[''].offlineBackup).toBe(true)
|
||||
|
||||
const schedule = await xo.getSchedule({ jobId: backup.id })
|
||||
|
||||
await Promise.all([
|
||||
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
|
||||
xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Halted') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.waitObjectState(vm.id, vm => {
|
||||
if (vm.power_state !== 'Running') {
|
||||
throw new Error('retry')
|
||||
}
|
||||
})
|
||||
|
||||
const backupLogs = await xo.getBackupLogs({
|
||||
jobId: backup.id,
|
||||
scheduleId: schedule.id,
|
||||
})
|
||||
expect(backupLogs.length).toBe(1)
|
||||
|
||||
const { tasks, ...log } = backupLogs[0]
|
||||
validateRootTask(log, {
|
||||
data: {
|
||||
mode: backupInput.mode,
|
||||
reportWhen: backupInput.settings[''].reportWhen,
|
||||
},
|
||||
jobId: backup.id,
|
||||
jobName: backupInput.name,
|
||||
scheduleId: schedule.id,
|
||||
status: 'success',
|
||||
})
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...vmTask }) => {
|
||||
validateVmTask(vmTask, vm.id, { status: 'success' })
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(({ tasks, ...subTask }) => {
|
||||
expect(subTask.message).not.toBe('snapshot')
|
||||
|
||||
if (subTask.message === 'export') {
|
||||
validateExportTask(
|
||||
subTask,
|
||||
subTask.data.type === 'remote' ? remoteId : srId,
|
||||
{
|
||||
data: expect.any(Object),
|
||||
status: 'success',
|
||||
}
|
||||
)
|
||||
|
||||
expect(Array.isArray(tasks)).toBe(true)
|
||||
tasks.forEach(operationTask => {
|
||||
if (
|
||||
operationTask.message === 'transfer' ||
|
||||
operationTask.message === 'merge'
|
||||
) {
|
||||
validateOperationTask(operationTask, {
|
||||
result: { size: expect.any(Number) },
|
||||
status: 'success',
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
}, 200e3)
|
||||
})
|
||||
@@ -1,51 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import config from '../_config'
|
||||
import xo from '../_xoConnection'
|
||||
|
||||
describe('issue', () => {
|
||||
test('4507', async () => {
|
||||
await xo.createTempServer(config.servers.default)
|
||||
|
||||
const props = {
|
||||
coresPerSocket: 1,
|
||||
cpuCap: 1,
|
||||
}
|
||||
const vm = await xo.createTempVm(props)
|
||||
expect(vm).toMatchObject(props)
|
||||
|
||||
await xo.call('vm.set', {
|
||||
coresPerSocket: null,
|
||||
cpuCap: null,
|
||||
id: vm.id,
|
||||
})
|
||||
await xo.waitObjectState(vm.id, vm => {
|
||||
expect(vm.coresPerSocket).toBe(undefined)
|
||||
expect(vm.cpuCap).toBe(undefined)
|
||||
})
|
||||
})
|
||||
|
||||
test('4514', async () => {
|
||||
await xo.createTempServer(config.servers.default)
|
||||
|
||||
const oldName = 'Old XO Test name'
|
||||
const { id, name_label } = await xo.createTempNetwork({ name: oldName })
|
||||
expect(name_label).toBe(oldName)
|
||||
|
||||
const newName = 'New XO Test name'
|
||||
await xo.call('network.set', { id, name_label: newName })
|
||||
await xo.waitObjectState(id, ({ name_label }) => {
|
||||
expect(name_label).toBe(newName)
|
||||
})
|
||||
})
|
||||
|
||||
test('4523', async () => {
|
||||
const id = await xo.call('network.create', {
|
||||
name: 'XO Test',
|
||||
pool: config.pools.default,
|
||||
})
|
||||
expect(typeof id).toBe('string')
|
||||
|
||||
await xo.call('network.delete', { id })
|
||||
})
|
||||
})
|
||||
@@ -1,76 +0,0 @@
|
||||
// Jest Snapshot v1, https://goo.gl/fbAQLP
|
||||
|
||||
exports[`job .create() : creates a new job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .create() : fails trying to create a job without job params 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .delete() : deletes an existing job 2`] = `[JsonRpcError: no such schedule [object Object]]`;
|
||||
|
||||
exports[`job .get() : fails trying to get a job with a non existent id 1`] = `[JsonRpcError: no such job [object Object]]`;
|
||||
|
||||
exports[`job .get() : gets an existing job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .getAll() : gets all available jobs 2`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.snapshot",
|
||||
"name": "jobTest2",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
|
||||
exports[`job .set() : fails trying to set a job without job.id 1`] = `[JsonRpcError: invalid parameters]`;
|
||||
|
||||
exports[`job .set() : sets a job 1`] = `
|
||||
Object {
|
||||
"id": Any<String>,
|
||||
"key": "snapshot",
|
||||
"method": "vm.clone",
|
||||
"name": "jobTest",
|
||||
"paramsVector": Any<Object>,
|
||||
"timeout": 2000,
|
||||
"type": "call",
|
||||
"userId": Any<String>,
|
||||
}
|
||||
`;
|
||||
@@ -1,226 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { difference, keyBy } from 'lodash'
|
||||
|
||||
import config from '../_config'
|
||||
import xo, { testWithOtherConnection } from '../_xoConnection'
|
||||
|
||||
const ADMIN_USER = {
|
||||
email: 'admin2@admin.net',
|
||||
password: 'admin',
|
||||
permission: 'admin',
|
||||
}
|
||||
|
||||
describe('job', () => {
|
||||
let defaultJob
|
||||
|
||||
beforeAll(() => {
|
||||
defaultJob = {
|
||||
name: 'jobTest',
|
||||
timeout: 2000,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.snapshot',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
describe('.create() :', () => {
|
||||
it('creates a new job', async () => {
|
||||
jest.setTimeout(6e3)
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const { email, password } = ADMIN_USER
|
||||
await testWithOtherConnection({ email, password }, async xo => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
expect(typeof id).toBe('string')
|
||||
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
expect(job.userId).toBe(userId)
|
||||
await xo.call('job.delete', { id })
|
||||
})
|
||||
})
|
||||
|
||||
it('creates a job with a userId', async () => {
|
||||
const userId = await xo.createTempUser(ADMIN_USER)
|
||||
const id = await xo.createTempJob({ ...defaultJob, userId })
|
||||
const { userId: expectedUserId } = await xo.call('job.get', { id })
|
||||
expect(userId).toBe(expectedUserId)
|
||||
})
|
||||
|
||||
it('fails trying to create a job without job params', async () => {
|
||||
await expect(xo.createTempJob({})).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.getAll() :', () => {
|
||||
it('gets all available jobs', async () => {
|
||||
const jobId1 = await xo.createTempJob(defaultJob)
|
||||
const job2 = {
|
||||
...defaultJob,
|
||||
name: 'jobTest2',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'test2-snapshot',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
const jobId2 = await xo.createTempJob(job2)
|
||||
let jobs = await xo.call('job.getAll')
|
||||
expect(Array.isArray(jobs)).toBe(true)
|
||||
jobs = keyBy(jobs, 'id')
|
||||
|
||||
const newJob1 = jobs[jobId1]
|
||||
expect(newJob1).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob1.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
|
||||
const newJob2 = jobs[jobId2]
|
||||
expect(newJob2).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob2.paramsVector).toEqual(job2.paramsVector)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.get() :', () => {
|
||||
it('gets an existing job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = await xo.call('job.get', { id })
|
||||
expect(job).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(job.paramsVector).toEqual(defaultJob.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to get a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.get', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set() :', () => {
|
||||
it('sets a job', async () => {
|
||||
const id = await xo.createTempJob(defaultJob)
|
||||
const job = {
|
||||
id,
|
||||
type: 'call',
|
||||
key: 'snapshot',
|
||||
method: 'vm.clone',
|
||||
paramsVector: {
|
||||
type: 'crossProduct',
|
||||
items: [
|
||||
{
|
||||
type: 'set',
|
||||
values: [
|
||||
{
|
||||
id: config.vms.default,
|
||||
name: 'clone',
|
||||
full_copy: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
await xo.call('job.set', {
|
||||
job,
|
||||
})
|
||||
|
||||
const newJob = await xo.call('job.get', { id })
|
||||
expect(newJob).toMatchSnapshot({
|
||||
id: expect.any(String),
|
||||
paramsVector: expect.any(Object),
|
||||
userId: expect.any(String),
|
||||
})
|
||||
expect(newJob.paramsVector).toEqual(job.paramsVector)
|
||||
})
|
||||
|
||||
it('fails trying to set a job without job.id', async () => {
|
||||
await expect(xo.call('job.set', defaultJob)).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.delete() :', () => {
|
||||
it('deletes an existing job', async () => {
|
||||
const id = await xo.call('job.create', { job: defaultJob })
|
||||
const { id: scheduleId } = await xo.call('schedule.create', {
|
||||
jobId: id,
|
||||
cron: '* * * * * *',
|
||||
enabled: false,
|
||||
})
|
||||
await xo.call('job.delete', { id })
|
||||
await expect(xo.call('job.get', { id })).rejects.toMatchSnapshot()
|
||||
await expect(
|
||||
xo.call('schedule.get', { id: scheduleId })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
|
||||
it.skip('fails trying to delete a job with a non existent id', async () => {
|
||||
await expect(
|
||||
xo.call('job.delete', { id: 'non-existent-id' })
|
||||
).rejects.toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
describe('.runSequence() :', () => {
|
||||
let id
|
||||
|
||||
afterEach(async () => {
|
||||
await xo
|
||||
.call('vm.delete', { id, deleteDisks: true })
|
||||
.catch(error => console.error(error))
|
||||
})
|
||||
|
||||
it('runs a job', async () => {
|
||||
jest.setTimeout(7e4)
|
||||
await xo.createTempServer(config.servers.default)
|
||||
const jobId = await xo.createTempJob(defaultJob)
|
||||
const snapshots = xo.objects.all[config.vms.default].snapshots
|
||||
await xo.call('job.runSequence', { idSequence: [jobId] })
|
||||
await xo.waitObjectState(
|
||||
config.vms.default,
|
||||
({ snapshots: actualSnapshots }) => {
|
||||
expect(actualSnapshots.length).toBe(snapshots.length + 1)
|
||||
id = difference(actualSnapshots, snapshots)[0]
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,156 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
|
||||
import { map } from 'lodash'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('disk', () => {
|
||||
let diskId
|
||||
let diskIds = []
|
||||
let serverId
|
||||
let srId
|
||||
let xo
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
xo = await getMainConnection()
|
||||
|
||||
const config = await getConfig()
|
||||
serverId = await xo.call(
|
||||
'server.add',
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
srId = await getSrId(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(diskIds, diskId => xo.call('vdi.delete', { id: diskId }))
|
||||
)
|
||||
diskIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', { id: serverId })
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createDisk(params) {
|
||||
const id = await xo.call('disk.create', params)
|
||||
diskIds.push(id)
|
||||
return id
|
||||
}
|
||||
|
||||
async function createDiskTest() {
|
||||
const id = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
return id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('create a new disk on a SR', async () => {
|
||||
diskId = await createDisk({
|
||||
name: 'diskTest',
|
||||
size: '1GB',
|
||||
sr: srId,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.type).to.be.equal('VDI')
|
||||
expect(disk.name_label).to.be.equal('diskTest')
|
||||
// TODO: should not test an exact value but around 10%
|
||||
expect(disk.size).to.be.equal(1000341504)
|
||||
expect(disk.$SR).to.be.equal(srId)
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).include(diskId)
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('deletes a disk', async () => {
|
||||
await Promise.all([
|
||||
xo.call('vdi.delete', { id: diskId }),
|
||||
waitObjectState(xo, diskId, disk => {
|
||||
expect(disk).to.be.undefined()
|
||||
}),
|
||||
waitObjectState(xo, srId, sr => {
|
||||
expect(sr.VDIs).not.include(diskId)
|
||||
}),
|
||||
])
|
||||
diskIds = []
|
||||
})
|
||||
})
|
||||
|
||||
// ---------------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
beforeEach(async () => {
|
||||
diskId = await createDiskTest()
|
||||
})
|
||||
|
||||
it('set the name of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_label: 'disk2',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_label).to.be.equal('disk2')
|
||||
})
|
||||
})
|
||||
|
||||
it('set the description of the disk', async () => {
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('set the size of the disk', async () => {
|
||||
await xo.getOrWaitObject(diskId)
|
||||
await xo.call('vdi.set', {
|
||||
id: diskId,
|
||||
size: '5MB',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, diskId, disk => {
|
||||
expect(disk.size).to.be.equal(6291456)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,59 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
// import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// import {getConnection} from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('docker', () => {
|
||||
// let xo
|
||||
// beforeAll(async () => {
|
||||
// xo = await getConnection()
|
||||
// })
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.register()', async () => {
|
||||
it('registers the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.deregister()', async () => {
|
||||
it('deregister the VM for Docker management')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.start()', async () => {
|
||||
it('starts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', async () => {
|
||||
it('stops the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', async () => {
|
||||
it('restarts the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.pause()', async () => {
|
||||
it('pauses the Docker')
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.unpause()', async () => {
|
||||
it('unpauses the Docker')
|
||||
})
|
||||
})
|
||||
@@ -1,377 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { createUser, deleteUsers, getUser, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
describe('group', () => {
|
||||
const userIds = []
|
||||
const groupIds = []
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(map(groupIds, id => xo.call('group.delete', { id })))
|
||||
// Deleting users must be done AFTER deleting the group
|
||||
// because there is a race condition in xo-server
|
||||
// which cause some users to not be properly deleted.
|
||||
|
||||
// The test “delete the group with its users” highlight this issue.
|
||||
await deleteUsers(xo, userIds)
|
||||
userIds.length = groupIds.length = 0
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createGroup(params) {
|
||||
const groupId = await xo.call('group.create', params)
|
||||
groupIds.push(groupId)
|
||||
return groupId
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function compareGroup(actual, expected) {
|
||||
expect(actual.name).toEqual(expected.name)
|
||||
expect(actual.id).toEqual(expected.id)
|
||||
expect(actual.users).toEqual(expected.users)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
|
||||
function getAllGroups() {
|
||||
return xo.call('group.getAll')
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------
|
||||
|
||||
async function getGroup(id) {
|
||||
const groups = await getAllGroups()
|
||||
return find(groups, { id: id })
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a group and return its id', async () => {
|
||||
const groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('does not create two groups with the same name', async () => {
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
|
||||
await createGroup({
|
||||
name: 'Avengers',
|
||||
}).then(
|
||||
() => {
|
||||
throw new Error('createGroup() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/duplicate group/i)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
groupId = await xo.call('group.create', {
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
it('delete a group', async () => {
|
||||
await xo.call('group.delete', {
|
||||
id: groupId,
|
||||
})
|
||||
const group = await getGroup(groupId)
|
||||
expect(group).toBeUndefined()
|
||||
})
|
||||
|
||||
it.skip("erase the group from user's groups list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('group.delete', { id: groupId })
|
||||
const user = await getUser(userId)
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
|
||||
it.skip("erase the user from group's users list", async () => {
|
||||
// create user and add it to the group
|
||||
const userId = await createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
})
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
// delete the group
|
||||
await xo.call('user.delete', { id: userId })
|
||||
const group = await getGroup(groupId)
|
||||
expect(group.users).toEqual([])
|
||||
})
|
||||
|
||||
// FIXME: some users are not properly deleted because of a race condition with group deletion.
|
||||
it.skip('delete the group with its users', async () => {
|
||||
// create users
|
||||
;[userId1, userId2, userId3] = await Promise.all([
|
||||
xo.call('user.create', {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
xo.call('user.create', {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2, userId3],
|
||||
})
|
||||
|
||||
// delete the group with his users
|
||||
await Promise.all([
|
||||
xo.call('group.delete', {
|
||||
id: groupId,
|
||||
}),
|
||||
deleteUsers(xo, [userId1, userId2, userId3]),
|
||||
])
|
||||
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
expect(group).toBeUndefined()
|
||||
expect(user1).toBeUndefined()
|
||||
expect(user2).toBeUndefined()
|
||||
expect(user3).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const groups = await xo.call('group.getAll')
|
||||
expect(groups).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.setUsers ()', () => {
|
||||
let groupId
|
||||
let userId1
|
||||
let userId2
|
||||
let userId3
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId1, userId2, userId3] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'natasha.romanov@shield.com',
|
||||
password: 'BlackWidow',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'pietro.maximoff@shield.com',
|
||||
password: 'QickSilver',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('can set users of a group', async () => {
|
||||
// add two users on the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId2],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId2],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([groupId])
|
||||
expect(user3.groups).toEqual([])
|
||||
}
|
||||
|
||||
// change users of the group
|
||||
await xo.call('group.setUsers', {
|
||||
id: groupId,
|
||||
userIds: [userId1, userId3],
|
||||
})
|
||||
{
|
||||
const [group, user1, user2, user3] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId1),
|
||||
getUser(xo, userId2),
|
||||
getUser(xo, userId3),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId1, userId3],
|
||||
})
|
||||
|
||||
expect(user1.groups).toEqual([groupId])
|
||||
expect(user2.groups).toEqual([])
|
||||
expect(user3.groups).toEqual([groupId])
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.addUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
it('adds a user id to a group', async () => {
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [userId],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([groupId])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('removeUser()', () => {
|
||||
let groupId
|
||||
let userId
|
||||
beforeEach(async () => {
|
||||
;[groupId, userId] = await Promise.all([
|
||||
createGroup({
|
||||
name: 'Avengers',
|
||||
}),
|
||||
createUser(xo, userIds, {
|
||||
email: 'tony.stark@stark_industry.com',
|
||||
password: 'IronMan',
|
||||
}),
|
||||
])
|
||||
|
||||
await xo.call('group.addUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
})
|
||||
|
||||
it('removes a user to a group', async () => {
|
||||
await xo.call('group.removeUser', {
|
||||
id: groupId,
|
||||
userId: userId,
|
||||
})
|
||||
|
||||
const [group, user] = await Promise.all([
|
||||
getGroup(groupId),
|
||||
getUser(xo, userId),
|
||||
])
|
||||
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Avengers',
|
||||
users: [],
|
||||
})
|
||||
|
||||
expect(user.groups).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('set()', () => {
|
||||
let groupId
|
||||
beforeEach(async () => {
|
||||
groupId = await createGroup({
|
||||
name: 'Avengers',
|
||||
})
|
||||
})
|
||||
|
||||
it('changes name of a group', async () => {
|
||||
await xo.call('group.set', {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
})
|
||||
|
||||
const group = await getGroup(groupId)
|
||||
compareGroup(group, {
|
||||
id: groupId,
|
||||
name: 'Guardians of the Galaxy',
|
||||
users: [],
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,239 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
|
||||
import expect from 'must'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import {
|
||||
getAllHosts,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getVmToMigrateId,
|
||||
waitObjectState,
|
||||
} from './util'
|
||||
import { find, forEach } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('host', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let hostId
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer2).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
hostId = getHost(config.host1)
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getHost(nameLabel) {
|
||||
const hosts = getAllHosts(xo)
|
||||
const host = find(hosts, { name_label: nameLabel })
|
||||
return host.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
let nameLabel
|
||||
let nameDescription
|
||||
|
||||
beforeEach(async () => {
|
||||
// get values to set them at the end of the test
|
||||
const host = xo.objects.all[hostId]
|
||||
nameLabel = host.name_label
|
||||
nameDescription = host.name_description
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes properties of the host', async () => {
|
||||
await xo.call('host.set', {
|
||||
id: hostId,
|
||||
name_label: 'labTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.name_label).to.be.equal('labTest')
|
||||
expect(host.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restart()', () => {
|
||||
jest.setTimeout(330e3)
|
||||
it('restart the host', async () => {
|
||||
await xo.call('host.restart', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.current_operations)
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.restartAgent()', () => {
|
||||
it('restart a Xen agent on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.start()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
beforeEach(async () => {
|
||||
try {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
} catch (_) {}
|
||||
|
||||
// test if the host is shutdown
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
})
|
||||
})
|
||||
|
||||
it('start the host', async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Running')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stop()', () => {
|
||||
jest.setTimeout(300e3)
|
||||
let vmId
|
||||
|
||||
beforeAll(async () => {
|
||||
vmId = await getVmToMigrateId(xo)
|
||||
try {
|
||||
await xo.call('vm.start', { id: vmId })
|
||||
} catch (_) {}
|
||||
try {
|
||||
await xo.call('vm.migrate', {
|
||||
vm: vmId,
|
||||
host: hostId,
|
||||
})
|
||||
} catch (_) {}
|
||||
})
|
||||
afterEach(async () => {
|
||||
await xo.call('host.start', { id: hostId })
|
||||
})
|
||||
|
||||
it('stop the host and shutdown its VMs', async () => {
|
||||
await xo.call('host.stop', { id: hostId })
|
||||
await Promise.all([
|
||||
waitObjectState(xo, vmId, vm => {
|
||||
expect(vm.$container).not.to.be.equal(hostId)
|
||||
expect(vm.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
waitObjectState(xo, hostId, host => {
|
||||
expect(host.power_state).to.be.equal('Halted')
|
||||
}),
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.detach()', () => {
|
||||
it('ejects the host of a pool')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.disable(), ', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('host.enable', {
|
||||
id: hostId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disables to create VM on the host', async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.enable()', async () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('host.disable', { id: hostId })
|
||||
})
|
||||
|
||||
it('enables to create VM on the host', async () => {
|
||||
await xo.call('host.enable', { id: hostId })
|
||||
|
||||
await waitObjectState(xo, hostId, host => {
|
||||
expect(host.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
describe('.createNetwork()', () => {
|
||||
it('create a network')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.listMissingPatches()', () => {
|
||||
it('returns an array of missing patches in the host')
|
||||
it('returns a empty array if up-to-date')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('installs a patch patch on the host')
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.stats()', () => {
|
||||
it('returns an array with statistics of the host', async () => {
|
||||
const stats = await xo.call('host.stats', {
|
||||
host: hostId,
|
||||
})
|
||||
expect(stats).to.be.an.object()
|
||||
|
||||
forEach(stats, function(array, key) {
|
||||
expect(array).to.be.an.array()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,79 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import { getConfig, getMainConnection, waitObjectState } from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { find } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('pool', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let poolId
|
||||
let config
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
poolId = getPoolId()
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
function getPoolId() {
|
||||
const pools = xo.objects.indexes.type.pool
|
||||
const pool = find(pools, { name_label: config.pool.name_label })
|
||||
return pool.id
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('.set()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: config.pool.name_label,
|
||||
name_description: '',
|
||||
})
|
||||
})
|
||||
it.skip('set pool parameters', async () => {
|
||||
await xo.call('pool.set', {
|
||||
id: poolId,
|
||||
name_label: 'nameTest',
|
||||
name_description: 'description',
|
||||
})
|
||||
|
||||
await waitObjectState(xo, poolId, pool => {
|
||||
expect(pool.name_label).to.be.equal('nameTest')
|
||||
expect(pool.name_description).to.be.equal('description')
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
describe('.installPatch()', () => {
|
||||
it('install a patch on the pool')
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('handlePatchUpload()', () => {
|
||||
it('')
|
||||
})
|
||||
})
|
||||
@@ -1,33 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('role', () => {
|
||||
describe('.getAll()', () => {
|
||||
it(' returns all the roles', async () => {
|
||||
const role = await xo.call('role.getAll')
|
||||
|
||||
// FIXME: use permutationOf but figure out how not to compare objects by
|
||||
// equality.
|
||||
expect(role).toEqual([
|
||||
{
|
||||
id: 'viewer',
|
||||
name: 'Viewer',
|
||||
permissions: ['view'],
|
||||
},
|
||||
{
|
||||
id: 'operator',
|
||||
name: 'Operator',
|
||||
permissions: ['view', 'operate'],
|
||||
},
|
||||
{
|
||||
id: 'admin',
|
||||
name: 'Admin',
|
||||
permissions: ['view', 'operate', 'administrate'],
|
||||
},
|
||||
])
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,149 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import { map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('schedule', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let scheduleIds = []
|
||||
let jobId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(scheduleIds, scheduleId =>
|
||||
xo.call('schedule.delete', { id: scheduleId })
|
||||
)
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async function createSchedule(params) {
|
||||
const schedule = await xo.call('schedule.create', params)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
async function createScheduleTest() {
|
||||
const schedule = await scheduleTest(xo, jobId)
|
||||
scheduleIds.push(schedule.id)
|
||||
return schedule
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('gets all existing schedules', async () => {
|
||||
const schedules = await xo.call('schedule.getAll')
|
||||
expect(schedules).to.be.an.array()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.get()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
|
||||
it('gets an existing schedule', async () => {
|
||||
const schedule = await xo.call('schedule.get', { id: scheduleId })
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a new schedule', async () => {
|
||||
const schedule = await createSchedule({
|
||||
jobId: jobId,
|
||||
cron: '* * * * * *',
|
||||
enabled: true,
|
||||
})
|
||||
expect(schedule.job).to.be.equal(jobId)
|
||||
expect(schedule.cron).to.be.equal('* * * * * *')
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let scheduleId
|
||||
beforeAll(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('modifies an existing schedule', async () => {
|
||||
await xo.call('schedule.set', {
|
||||
id: scheduleId,
|
||||
cron: '2 * * * * *',
|
||||
})
|
||||
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.cron).to.be.equal('2 * * * * *')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
let scheduleId
|
||||
beforeEach(async () => {
|
||||
scheduleId = (await createScheduleTest()).id
|
||||
})
|
||||
it('deletes an existing schedule', async () => {
|
||||
await xo.call('schedule.delete', { id: scheduleId })
|
||||
await getSchedule(xo, scheduleId).then(
|
||||
() => {
|
||||
throw new Error('getSchedule() should have thrown')
|
||||
},
|
||||
function(error) {
|
||||
expect(error.message).to.match(/no such object/)
|
||||
}
|
||||
)
|
||||
scheduleIds = []
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,82 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
// Doc: https://github.com/moll/js-must/blob/master/doc/API.md#must
|
||||
import expect from 'must'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
import {
|
||||
jobTest,
|
||||
scheduleTest,
|
||||
getConfig,
|
||||
getMainConnection,
|
||||
getSchedule,
|
||||
} from './util'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('scheduler', () => {
|
||||
let xo
|
||||
let serverId
|
||||
let jobId
|
||||
let scheduleId
|
||||
|
||||
beforeAll(async () => {
|
||||
jest.setTimeout(10e3)
|
||||
let config
|
||||
;[xo, config] = await Promise.all([getMainConnection(), getConfig()])
|
||||
|
||||
serverId = await xo.call('server.add', config.xenServer1).catch(() => {})
|
||||
await eventToPromise(xo.objects, 'finish')
|
||||
|
||||
jobId = await jobTest(xo)
|
||||
scheduleId = (await scheduleTest(xo, jobId)).id
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all([
|
||||
xo.call('schedule.delete', { id: scheduleId }),
|
||||
xo.call('job.delete', { id: jobId }),
|
||||
xo.call('server.remove', { id: serverId }),
|
||||
])
|
||||
})
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.enable()', () => {
|
||||
afterEach(async () => {
|
||||
await xo.call('scheduler.disable', { id: scheduleId })
|
||||
})
|
||||
it.skip("enables a schedule to run it's job as scheduled", async () => {
|
||||
await xo.call('scheduler.enable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.true()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disable()', () => {
|
||||
beforeEach(async () => {
|
||||
await xo.call('schedule.enable', { id: scheduleId })
|
||||
})
|
||||
it.skip('disables a schedule', async () => {
|
||||
await xo.call('schedule.disable', { id: scheduleId })
|
||||
const schedule = await getSchedule(xo, scheduleId)
|
||||
expect(schedule.enabled).to.be.false()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getScheduleTable()', () => {
|
||||
it('get a map of existing schedules', async () => {
|
||||
const table = await xo.call('scheduler.getScheduleTable')
|
||||
expect(table).to.be.an.object()
|
||||
expect(table).to.match(scheduleId)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,208 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { find, map } from 'lodash'
|
||||
|
||||
import { config, rejectionOf, xo } from './util'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('server', () => {
|
||||
let serverIds = []
|
||||
|
||||
afterEach(async () => {
|
||||
await Promise.all(
|
||||
map(serverIds, serverId => xo.call('server.remove', { id: serverId }))
|
||||
)
|
||||
serverIds = []
|
||||
})
|
||||
|
||||
async function addServer(params) {
|
||||
const serverId = await xo.call('server.add', params)
|
||||
serverIds.push(serverId)
|
||||
return serverId
|
||||
}
|
||||
|
||||
function getAllServers() {
|
||||
return xo.call('server.getAll')
|
||||
}
|
||||
|
||||
async function getServer(id) {
|
||||
const servers = await getAllServers()
|
||||
return find(servers, { id: id })
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
|
||||
describe('.add()', () => {
|
||||
it('add a Xen server and return its id', async () => {
|
||||
const serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(typeof server.id).toBe('string')
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
|
||||
it('does not add two servers with the same host', async () => {
|
||||
await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
expect(
|
||||
(await rejectionOf(
|
||||
addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
)).message
|
||||
).toBe('unknown error from the peer')
|
||||
})
|
||||
|
||||
it('set autoConnect true by default', async () => {
|
||||
const serverId = await addServer(config.xenServer1)
|
||||
const server = await getServer(serverId)
|
||||
|
||||
expect(server.id).toBe(serverId)
|
||||
expect(server.host).toBe('192.168.100.3')
|
||||
expect(server.username).toBe('root')
|
||||
expect(server.status).toMatch(/^connect(?:ed|ing)$/)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.remove()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('remove a Xen server', async () => {
|
||||
await xo.call('server.remove', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.getAll()', () => {
|
||||
it('returns an array', async () => {
|
||||
const servers = await xo.call('server.getAll')
|
||||
|
||||
expect(servers).toBeInstanceOf(Array)
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.set()', () => {
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer({
|
||||
host: 'xen1.example.org',
|
||||
username: 'root',
|
||||
password: 'password',
|
||||
autoConnect: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('changes attributes of an existing server', async () => {
|
||||
await xo.call('server.set', {
|
||||
id: serverId,
|
||||
username: 'root2',
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: 'xen1.example.org',
|
||||
username: 'root2',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.connect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
|
||||
it('connects to a Xen server', async () => {
|
||||
const serverId = await addServer(
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
enabled: 'true',
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'connected',
|
||||
})
|
||||
})
|
||||
|
||||
it.skip('connect to a Xen server on a slave host', async () => {
|
||||
const serverId = await addServer(config.slaveServer)
|
||||
await xo.call('server.connect', { id: serverId })
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server.status).toBe('connected')
|
||||
})
|
||||
})
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
describe('.disconnect()', () => {
|
||||
jest.setTimeout(5e3)
|
||||
let serverId
|
||||
beforeEach(async () => {
|
||||
serverId = await addServer(
|
||||
Object.assign({ autoConnect: false }, config.xenServer1)
|
||||
)
|
||||
await xo.call('server.connect', {
|
||||
id: serverId,
|
||||
})
|
||||
})
|
||||
|
||||
it('disconnects to a Xen server', async () => {
|
||||
await xo.call('server.disconnect', {
|
||||
id: serverId,
|
||||
})
|
||||
|
||||
const server = await getServer(serverId)
|
||||
expect(server).toEqual({
|
||||
id: serverId,
|
||||
host: '192.168.100.3',
|
||||
username: 'root',
|
||||
status: 'disconnected',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,53 +0,0 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import defer from 'golike-defer'
|
||||
import { map } from 'lodash'
|
||||
|
||||
import { getConnection, rejectionOf, testConnection, xo } from './util.js'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
describe('token', () => {
|
||||
const tokens = []
|
||||
|
||||
afterAll(async () => {
|
||||
await Promise.all(map(tokens, token => xo.call('token.delete', { token })))
|
||||
})
|
||||
|
||||
async function createToken() {
|
||||
const token = await xo.call('token.create')
|
||||
tokens.push(token)
|
||||
return token
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
describe('.create()', () => {
|
||||
it('creates a token string which can be used to sign in', async () => {
|
||||
const token = await createToken()
|
||||
|
||||
await testConnection({ credentials: { token } })
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('.delete()', () => {
|
||||
it(
|
||||
'deletes a token',
|
||||
defer(async $defer => {
|
||||
const token = await createToken()
|
||||
const xo2 = await getConnection({ credentials: { token } })
|
||||
$defer(() => xo2.close())
|
||||
|
||||
await xo2.call('token.delete', {
|
||||
token,
|
||||
})
|
||||
|
||||
expect(
|
||||
(await rejectionOf(testConnection({ credentials: { token } }))).code
|
||||
).toBe(3)
|
||||
})
|
||||
)
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user