Compare commits

..

11 Commits

Author SHA1 Message Date
heafalan
50704b8404 feat(xo-server-test/backupNg.spec.js): test on 'backupNg.importVmBackup'
See #4307
2019-08-12 15:04:03 +02:00
heafalan
341cdf6cba delete snapshot at the end of the test 2019-08-12 14:04:01 +02:00
heafalan
ced02de429 fixes tests for QA 2019-08-09 12:58:01 +02:00
heafalan
1745d61c1d requested changes 2019-08-08 14:08:23 +02:00
heafalan
fac2e6845c requested changes 2019-08-08 10:21:25 +02:00
heafalan
fd7eddb68c various changes 2019-08-07 13:09:08 +02:00
heafalan
23fb486a40 various changes 2019-08-05 16:27:15 +02:00
heafalan
9114aa5b11 add server 2019-08-02 12:39:50 +02:00
heafalan
fdb3368b44 Merge branch 'master' of http://github.com/vatesfr/xen-orchestra into deltaBackup-with2remotes 2019-08-02 12:09:42 +02:00
heafalan
c0949e9aef Merge branch 'master' of http://github.com/vatesfr/xen-orchestra into deltaBackup-with2remotes 2019-08-02 12:09:22 +02:00
heafalan
ae9b4126b1 feat(xo-server-test): delta backup test
See #4307
2019-07-31 15:52:37 +02:00
235 changed files with 7126 additions and 9871 deletions

View File

@@ -21,7 +21,7 @@ module.exports = {
overrides: [
{
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
rules: {
'no-console': 'off',
},
@@ -38,15 +38,6 @@ module.exports = {
// disabled because XAPI objects are using camel case
camelcase: ['off'],
'react/jsx-handler-names': 'off',
// disabled because not always relevant, we might reconsider in the future
//
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
//
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
'lines-between-class-members': 'off',
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',

View File

@@ -36,8 +36,8 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,378 +0,0 @@
#!/usr/bin/env node
const args = process.argv.slice(2)
if (
args.length === 0 ||
/^(?:-h|--help)$/.test(args[0]) ||
args[0] !== 'clean-vms'
) {
console.log('Usage: xo-backups clean-vms [--force] xo-vm-backups/*')
// eslint-disable-next-line no-process-exit
return process.exit(1)
}
// remove `clean-vms` arg which is the only available command ATM
args.splice(0, 1)
// only act (ie delete files) if `--force` is present
const force = args[0] === '--force'
if (force) {
args.splice(0, 1)
}
// -----------------------------------------------------------------------------
const assert = require('assert')
const lockfile = require('proper-lockfile')
const { default: Vhd } = require('vhd-lib')
const { curryRight, flatten } = require('lodash')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { pipe, promisifyAll } = require('promise-toolbox')
const fs = promisifyAll(require('fs'))
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
// -----------------------------------------------------------------------------
const asyncMap = curryRight((iterable, fn) =>
Promise.all(
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
)
)
const filter = (...args) => thisArg => thisArg.filter(...args)
// TODO: better check?
// our heuristic is not good enough, there has been some false positives
// (detected as invalid by us but valid by `tar` and imported with success),
// either:
// - these files were normal but the check is incorrect
// - these files were invalid but without data loss
// - these files were invalid but with silent data loss
//
// FIXME: the heuristic does not work if the XVA is compressed, we need to
// implement a specific test for it
//
// maybe reading the end of the file looking for a file named
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
//
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
const isValidTar = async path => {
try {
const fd = await fs.open(path, 'r')
try {
const { size } = await fs.fstat(fd)
if (size <= 1024 || size % 512 !== 0) {
return false
}
const buf = Buffer.allocUnsafe(1024)
assert.strictEqual(
await fs.read(fd, buf, 0, buf.length, size - buf.length),
buf.length
)
return buf.every(_ => _ === 0)
} finally {
fs.close(fd).catch(noop)
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidTar', path, error)
return true
}
}
const noop = Function.prototype
const readDir = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
entries[i] = `${path}/${entry}`
})
return entries
},
error => {
// a missing dir is by definition empty
if (error != null && error.code === 'ENOENT') {
return []
}
throw error
}
)
// -----------------------------------------------------------------------------
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
console.warn('Unused parents of VHD', child)
chain
.slice(1)
.reverse()
.forEach(parent => {
console.warn(' ', parent)
})
force && console.warn(' merging…')
console.warn('')
if (force) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
}
await Promise.all([
force && fs.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
console.warn('Unused VHD', child)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(child)
}),
])
}
const listVhds = pipe([
vmDir => vmDir + '/vdis',
readDir,
asyncMap(readDir),
flatten,
asyncMap(readDir),
flatten,
filter(_ => _.endsWith('.vhd')),
])
async function handleVm(vmDir) {
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(await listVhds(vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error(
'this script does not support multiple VHD children'
)
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
console.warn('Error while checking VHD', path)
console.warn(' ', error)
if (error != null && error.code === 'ERR_ASSERTION') {
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(path))
}
}
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
console.warn('Error while checking VHD', vhd)
console.warn(' missing parent', parent)
force && console.warn(' deleting…')
console.warn('')
force && deletions.push(handler.unlink(vhd))
}
}
// > A property that is deleted before it has been visited will not be
// > visited later.
// >
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
for (const child in vhdParents) {
deleteIfOrphan(child)
}
await Promise.all(deletions)
}
const [jsons, xvas] = await readDir(vmDir).then(entries => [
entries.filter(_ => _.endsWith('.json')),
new Set(entries.filter(_ => _.endsWith('.xva'))),
])
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await isValidTar(path))) {
console.warn('Potential broken XVA', path)
console.warn('')
}
})
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await fs.readFile(json))
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve(vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
console.warn('Error while checking backup', json)
console.warn(' missing file', linkedXva)
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
} else {
console.warn('Error while checking backup', json)
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
console.warn(
' %i/%i missing VHDs',
missingVhds.length,
linkedVhds.length
)
missingVhds.forEach(vhd => {
console.warn(' ', vhd)
})
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
const toCheck = new Set(unusedVhds)
const getUsedChildChainOrDelete = vhd => {
if (vhd in vhdChainsToMerge) {
const chain = vhdChainsToMerge[vhd]
delete vhdChainsToMerge[vhd]
return chain
}
if (!unusedVhds.has(vhd)) {
return [vhd]
}
// no longer needs to be checked
toCheck.delete(vhd)
const child = vhdChildren[vhd]
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
return chain
}
}
console.warn('Unused VHD', vhd)
force && console.warn(' deleting…')
console.warn('')
force && unusedVhdsDeletion.push(handler.unlink(vhd))
}
toCheck.forEach(vhd => {
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain))
}
})
}
await Promise.all([
unusedVhdsDeletion,
asyncMap(unusedXvas, path => {
console.warn('Unused XVA', path)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(path)
}),
])
}
// -----------------------------------------------------------------------------
asyncMap(args, async vmDir => {
vmDir = resolve(vmDir)
// TODO: implement this in `xo-server`, not easy because not compatible with
// `@xen-orchestra/fs`.
const release = await lockfile.lock(vmDir)
try {
await handleVm(vmDir)
} catch (error) {
console.error('handleVm', vmDir, error)
} finally {
await release()
}
}).catch(error => console.error('main', error))

View File

@@ -1,27 +0,0 @@
{
"bin": {
"xo-backups": "index.js"
},
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/fs": "^0.10.1",
"lodash": "^4.17.15",
"promise-toolbox": "^0.14.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^0.7.0"
},
"engines": {
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
"repository": {
"directory": "@xen-orchestra/backups-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.0.0"
}

View File

@@ -16,7 +16,7 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.27.2"
"xen-api": "^0.27.1"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "1.0.5",
"version": "1.0.3",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [
@@ -46,8 +46,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -7,21 +7,7 @@ const MAX_DELAY = 2 ** 31 - 1
class Job {
constructor(schedule, fn) {
let scheduledDate
const wrapper = () => {
const now = Date.now()
if (scheduledDate > now) {
// we're early, delay
//
// no need to check _isEnabled, we're just delaying the existing timeout
//
// see https://github.com/vatesfr/xen-orchestra/issues/4625
this._timeout = setTimeout(wrapper, scheduledDate - now)
return
}
this._isRunning = true
let result
try {
result = fn()
@@ -36,36 +22,23 @@ class Job {
}
}
const scheduleNext = () => {
this._isRunning = false
if (this._isEnabled) {
const now = Date.now()
scheduledDate = +schedule._createDate()
const delay = scheduledDate - now
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)
: setTimeout(scheduleNext, MAX_DELAY)
}
const delay = schedule._nextDelay()
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)
: setTimeout(scheduleNext, MAX_DELAY)
}
this._isEnabled = false
this._isRunning = false
this._scheduleNext = scheduleNext
this._timeout = undefined
}
start() {
this.stop()
this._isEnabled = true
if (!this._isRunning) {
this._scheduleNext()
}
this._scheduleNext()
}
stop() {
this._isEnabled = false
clearTimeout(this._timeout)
}
}
@@ -95,6 +68,11 @@ class Schedule {
return dates
}
_nextDelay() {
const now = this._createDate()
return next(this._schedule, now) - now
}
startJob(fn) {
const job = this.createJob(fn)
job.start()

View File

@@ -1,62 +0,0 @@
/* eslint-env jest */
import { createSchedule } from './'
describe('issues', () => {
test('stop during async execution', async () => {
let nCalls = 0
let resolve, promise
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
promise = new Promise(r => {
resolve = r
})
return promise
})
job.start()
jest.runAllTimers()
expect(nCalls).toBe(1)
job.stop()
resolve()
await promise
jest.runAllTimers()
expect(nCalls).toBe(1)
})
test('stop then start during async job execution', async () => {
let nCalls = 0
let resolve, promise
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
promise = new Promise(r => {
resolve = r
})
return promise
})
job.start()
jest.runAllTimers()
expect(nCalls).toBe(1)
job.stop()
job.start()
resolve()
await promise
jest.runAllTimers()
expect(nCalls).toBe(2)
})
})

View File

@@ -34,8 +34,8 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -33,8 +33,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -30,7 +30,7 @@
"get-stream": "^4.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
@@ -46,10 +46,10 @@
"@babel/preset-flow": "^7.0.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"dotenv": "^8.0.0",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/log",
"version": "0.2.0",
"version": "0.1.4",
"license": "ISC",
"description": "",
"keywords": [],
@@ -31,16 +31,16 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -19,8 +19,7 @@ const createTransport = config => {
}
}
let { filter } = config
let transport = createTransport(config.transport)
let { filter, transport } = config
const level = resolve(config.level)
if (filter !== undefined) {
@@ -52,12 +51,11 @@ const symbol =
? Symbol.for('@xen-orchestra/log')
: '@@@xen-orchestra/log'
const { env } = process
global[symbol] = createTransport({
// display warnings or above, and all that are enabled via DEBUG or
// NODE_DEBUG env
filter: [env.DEBUG, env.NODE_DEBUG].filter(Boolean).join(','),
level: resolve(env.LOG_LEVEL, LEVELS.INFO),
filter: process.env.DEBUG || process.env.NODE_DEBUG,
level: LEVELS.INFO,
transport: createConsoleTransport(),
})

View File

@@ -1,5 +1,5 @@
import createTransport from './transports/console'
import LEVELS, { resolve } from './levels'
import LEVELS from './levels'
const symbol =
typeof Symbol !== 'undefined'
@@ -9,8 +9,7 @@ if (!(symbol in global)) {
// the default behavior, without requiring `configure` is to avoid
// logging anything unless it's a real error
const transport = createTransport()
const level = resolve(process.env.LOG_LEVEL, LEVELS.WARN)
global[symbol] = log => log.level >= level && transport(log)
global[symbol] = log => log.level > LEVELS.WARN && transport(log)
}
// -------------------------------------------------------------------
@@ -73,5 +72,5 @@ prototype.wrap = function(message, fn) {
}
}
export const createLogger = namespace => new Logger(namespace)
const createLogger = namespace => new Logger(namespace)
export { createLogger as default }

View File

@@ -13,22 +13,11 @@ for (const name in LEVELS) {
NAMES[LEVELS[name]] = name
}
// resolves to the number representation of a level
//
// returns `defaultLevel` if invalid
export const resolve = (level, defaultLevel) => {
const type = typeof level
if (type === 'number') {
if (level in NAMES) {
return level
}
} else if (type === 'string') {
const nLevel = LEVELS[level.toUpperCase()]
if (nLevel !== undefined) {
return nLevel
}
export const resolve = level => {
if (typeof level === 'string') {
level = LEVELS[level.toUpperCase()]
}
return defaultLevel
return level
}
Object.freeze(LEVELS)

View File

@@ -1,23 +1,26 @@
import LEVELS, { NAMES } from '../levels'
// Bind console methods (necessary for browsers)
/* eslint-disable no-console */
const debugConsole = console.log.bind(console)
const infoConsole = console.info.bind(console)
const warnConsole = console.warn.bind(console)
const errorConsole = console.error.bind(console)
/* eslint-enable no-console */
const { ERROR, INFO, WARN } = LEVELS
const consoleTransport = ({ data, level, namespace, message, time }) => {
const fn =
/* eslint-disable no-console */
level < INFO
? console.log
? debugConsole
: level < WARN
? console.info
? infoConsole
: level < ERROR
? console.warn
: console.error
/* eslint-enable no-console */
? warnConsole
: errorConsole
const args = [time.toISOString(), namespace, NAMES[level], message]
if (data != null) {
args.push(data)
}
fn.apply(console, args)
fn('%s - %s - [%s] %s', time.toISOString(), namespace, NAMES[level], message)
data != null && fn(data)
}
export default () => consoleTransport

View File

@@ -36,8 +36,8 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-dev": "^1.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,62 +0,0 @@
# @xen-orchestra/template [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/template):
```
> npm install --save @xen-orchestra/template
```
## Usage
Create a string replacer based on a pattern and a list of rules.
```js
const myReplacer = compileTemplate('{name}_COPY_\{name}_{id}_%\%', {
'{name}': vm => vm.name_label,
'{id}': vm => vm.id,
'%': (_, i) => i
})
const newString = myReplacer({
name_label: 'foo',
id: 42,
}, 32)
newString === 'foo_COPY_{name}_42_32%' // true
```
## Development
```
# Install dependencies
> yarn
# Run the tests
> yarn test
# Continuously compile
> yarn dev
# Continuously run the tests
> yarn dev-test
# Build for production (automatically called by npm install)
> yarn build
```
## Contributions
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)

View File

@@ -1,19 +0,0 @@
import escapeRegExp from 'lodash/escapeRegExp'
const compareLengthDesc = (a, b) => b.length - a.length
export function compileTemplate(pattern, rules) {
const matches = Object.keys(rules)
.sort(compareLengthDesc)
.map(escapeRegExp)
.join('|')
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
return (...params) =>
pattern.replace(regExp, match => {
if (match[0] === '\\') {
return match.slice(1)
}
const rule = rules[match]
return typeof rule === 'function' ? rule(...params) : rule
})
}

View File

@@ -1,14 +0,0 @@
/* eslint-env jest */
import { compileTemplate } from '.'
it("correctly replaces the template's variables", () => {
const replacer = compileTemplate(
'{property}_\\{property}_\\\\{property}_{constant}_%_FOO',
{
'{property}': obj => obj.name,
'{constant}': 1235,
'%': (_, i) => i,
}
)
expect(replacer({ name: 'bar' }, 5)).toBe('bar_{property}_\\bar_1235_5_FOO')
})

View File

@@ -8,123 +8,13 @@
### Released packages
## **5.39.1** (2019-10-11)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
### Bug fixes
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
### Released packages
- xo-web v5.50.3
## **5.39.0** (2019-09-30)
### Highlights
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
- [SDN Controller] Add possibility to encrypt private networks (PR [#4441](https://github.com/vatesfr/xen-orchestra/pull/4441))
- [Backups] Improve performance by caching VM backups listing (PR [#4509](https://github.com/vatesfr/xen-orchestra/pull/4509))
- [HUB] VM template store [#1918](https://github.com/vatesfr/xen-orchestra/issues/1918) (PR [#4442](https://github.com/vatesfr/xen-orchestra/pull/4442))
### Enhancements
- [SR/new] Clarify address formats [#4450](https://github.com/vatesfr/xen-orchestra/issues/4450) (PR [#4460](https://github.com/vatesfr/xen-orchestra/pull/4460))
- [Backup NG/New] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4411](https://github.com/vatesfr/xen-orchestra/pull/4411))
- [VM/disks] Don't hide disks that are attached to the same VM twice [#4400](https://github.com/vatesfr/xen-orchestra/issues/4400) (PR [#4414](https://github.com/vatesfr/xen-orchestra/pull/4414))
- [SDN Controller] Ability to configure MTU for private networks (PR [#4491](https://github.com/vatesfr/xen-orchestra/pull/4491))
- [VM Export] Filenames are now prefixed with datetime [#4503](https://github.com/vatesfr/xen-orchestra/issues/4503)
- [Settings/Logs] Differenciate XS/XCP-ng errors from XO errors [#4101](https://github.com/vatesfr/xen-orchestra/issues/4101) (PR [#4385](https://github.com/vatesfr/xen-orchestra/pull/4385))
- [Backups] Improve performance by caching logs consolidation (PR [#4541](https://github.com/vatesfr/xen-orchestra/pull/4541))
- [New VM] Cloud Init available for all plans (PR [#4543](https://github.com/vatesfr/xen-orchestra/pull/4543))
- [Servers] IPv6 addresses can be used [#4520](https://github.com/vatesfr/xen-orchestra/issues/4520) (PR [#4521](https://github.com/vatesfr/xen-orchestra/pull/4521)) \
Note: They must enclosed in brackets to differentiate with the port, e.g.: `[2001:db8::7334]` or `[ 2001:db8::7334]:4343`
### Bug fixes
- [PBD] Obfuscate cifs password from device config [#4384](https://github.com/vatesfr/xen-orchestra/issues/4384) (PR [#4401](https://github.com/vatesfr/xen-orchestra/pull/4401))
- [XOSAN] Fix "invalid parameters" error on creating a SR (PR [#4478](https://github.com/vatesfr/xen-orchestra/pull/4478))
- [Patching] Avoid overloading XCP-ng by reducing the frequency of yum update checks [#4358](https://github.com/vatesfr/xen-orchestra/issues/4358) (PR [#4477](https://github.com/vatesfr/xen-orchestra/pull/4477))
- [Network] Fix inability to create a bonded network (PR [#4489](https://github.com/vatesfr/xen-orchestra/pull/4489))
- [Backup restore & Replication] Don't copy `sm_config` to new VDIs which might leads to useless coalesces [#4482](https://github.com/vatesfr/xen-orchestra/issues/4482) (PR [#4484](https://github.com/vatesfr/xen-orchestra/pull/4484))
- [Home] Fix intermediary "no results" display showed on filtering items [#4420](https://github.com/vatesfr/xen-orchestra/issues/4420) (PR [#4456](https://github.com/vatesfr/xen-orchestra/pull/4456)
- [Backup NG/New schedule] Properly show user errors in the form [#3831](https://github.com/vatesfr/xen-orchestra/issues/3831) (PR [#4131](https://github.com/vatesfr/xen-orchestra/pull/4131))
- [VM/Advanced] Fix `"vm.set_domain_type" is not a function` error on switching virtualization mode (PV/HVM) [#4348](https://github.com/vatesfr/xen-orchestra/issues/4348) (PR [#4504](https://github.com/vatesfr/xen-orchestra/pull/4504))
- [Backup NG/logs] Show warning when zstd compression is selected but not supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4375](https://github.com/vatesfr/xen-orchestra/pull/4375)
- [Patches] Fix patches installation for CH 8.0 (PR [#4511](https://github.com/vatesfr/xen-orchestra/pull/4511))
- [Network] Fix inability to set a network name [#4514](https://github.com/vatesfr/xen-orchestra/issues/4514) (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
- [Backup NG] Fix race conditions that could lead to disabled jobs still running (PR [4510](https://github.com/vatesfr/xen-orchestra/pull/4510))
- [XOA] Remove "Updates" and "Licenses" tabs for non admin users (PR [#4526](https://github.com/vatesfr/xen-orchestra/pull/4526))
- [New VM] Ability to escape [cloud config template](https://xen-orchestra.com/blog/xen-orchestra-5-21/#cloudconfigtemplates) variables [#4486](https://github.com/vatesfr/xen-orchestra/issues/4486) (PR [#4501](https://github.com/vatesfr/xen-orchestra/pull/4501))
- [Backup NG] Properly log and report if job is already running [#4497](https://github.com/vatesfr/xen-orchestra/issues/4497) (PR [4534](https://github.com/vatesfr/xen-orchestra/pull/4534))
- [Host] Fix an issue where host was wrongly reporting time inconsistency (PR [#4540](https://github.com/vatesfr/xen-orchestra/pull/4540))
### Released packages
- xen-api v0.27.2
- xo-server-cloud v0.3.0
- @xen-orchestra/cron v1.0.4
- xo-server-sdn-controller v0.3.0
- @xen-orchestra/template v0.1.0
- xo-server v5.50.1
- xo-web v5.50.2
## **5.38.0** (2019-08-29)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Enhancements
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))
- [Zstd]
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
- [VM/Bulk copy] Show warning if zstd compression is not supported on a VM [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PR [#4346](https://github.com/vatesfr/xen-orchestra/pull/4346))
- [VM import & Continuous Replication] Enable `guessVhdSizeOnImport` by default, this fix some `VDI_IO_ERROR` with XenServer 7.1 and XCP-ng 8.0 (PR [#4436](https://github.com/vatesfr/xen-orchestra/pull/4436))
- [SDN Controller] Add possibility to create multiple GRE networks and VxLAN networks within a same pool (PR [#4435](https://github.com/vatesfr/xen-orchestra/pull/4435))
- [SDN Controller] Add possibility to create cross-pool private networks (PR [#4405](https://github.com/vatesfr/xen-orchestra/pull/4405))
### Bug fixes
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
- [VM/Attach disk] Fix checking VDI mode (PR [#4373](https://github.com/vatesfr/xen-orchestra/pull/4373))
- [VM revert] Snapshot before: add admin ACLs on created snapshot [#4331](https://github.com/vatesfr/xen-orchestra/issues/4331) (PR [#4391](https://github.com/vatesfr/xen-orchestra/pull/4391))
- [Network] Fixed "invalid parameters" error when creating bonded network [#4425](https://github.com/vatesfr/xen-orchestra/issues/4425) (PR [#4429](https://github.com/vatesfr/xen-orchestra/pull/4429))
### Released packages
- xo-server-sdn-controller v0.2.0
- xo-server-usage-report v0.7.3
- xo-server v5.48.0
- xo-web v5.48.1
## **5.37.1** (2019-08-06)
### Enhancements
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
### Bug fixes
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
### Released packages
- xo-server-sdn-controller v0.1.2
- xo-server v5.47.0
- xo-web v5.47.0
## **5.37.0** (2019-07-25)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Highlights
- [Pool] Ability to add multiple hosts on the pool [#2402](https://github.com/vatesfr/xen-orchestra/issues/2402) (PR [#3716](https://github.com/vatesfr/xen-orchestra/pull/3716))
@@ -165,6 +55,8 @@
## **5.36.0** (2019-06-27)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Highlights
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))

View File

@@ -3,29 +3,20 @@
> Keep in mind the changelog is addressed to **users** and should be
> understandable by them.
### Breaking changes
- `xo-server` requires Node 8.
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
- [VM/copy, VM/export] Only show zstd option when it's supported [#3892](https://github.com/vatesfr/xen-orchestra/issues/3892) (PRs [#4326](https://github.com/vatesfr/xen-orchestra/pull/4326) [#4368](https://github.com/vatesfr/xen-orchestra/pull/4368))
- [SDN Controller] Let the user choose on which PIF to create a private network (PR [#4379](https://github.com/vatesfr/xen-orchestra/pull/4379))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
- [SDN Controller] Better detect host shutting down to adapt network topology (PR [#4314](https://github.com/vatesfr/xen-orchestra/pull/4314))
- [SR/General] Display VDI VM name in SR usage graph (PR [#4370](https://github.com/vatesfr/xen-orchestra/pull/4370))
- [SDN Controller] Add new hosts to pool's private networks (PR [#4382](https://github.com/vatesfr/xen-orchestra/pull/4382))
### Released packages
@@ -34,12 +25,7 @@
>
> Rule of thumb: add packages on top.
- @xen-orchestra/cron v1.0.5
- xo-server-transport-icinga2 v0.1.0
- xo-server-sdn-controller v0.3.1
- xo-server v5.51.0
- xo-web v5.51.0
### Dropped packages
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
- xo-server-usage-report v0.7.3
- xo-server-sdn-controller v0.1.2
- xo-server v5.47.0
- xo-web v5.47.0

View File

@@ -1,19 +1,14 @@
### Check list
> Check if done.
>
> Strikethrough if not relevant: ~~example~~ ([doc](https://help.github.com/en/articles/basic-writing-and-formatting-syntax)).
> Check items when done or if not relevant
- [ ] PR reference the relevant issue (e.g. `Fixes #007` or `See xoa-support#42`)
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
- [ ] if UI changes, a screenshot has been added to the PR
- [ ] `CHANGELOG.unreleased.md`:
- enhancement/bug fix entry added
- list of packages to release updated (`${name} v${new version}`)
- [ ] documentation updated
- `CHANGELOG.unreleased.md`:
- [ ] enhancement/bug fix entry added
- [ ] list of packages to release updated (`${name} v${new version}`)
- **I have tested added/updated features** (and impacted code)
- [ ] unit tests (e.g. [`cron/parse.spec.js`](https://github.com/vatesfr/xen-orchestra/blob/b24400b21de1ebafa1099c56bac1de5c988d9202/%40xen-orchestra/cron/src/parse.spec.js))
- [ ] if `xo-server` API changes, the corresponding test has been added to/updated on [`xo-server-test`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-test)
- [ ] at least manual testing
- [ ] **I have tested added/updated features** (and impacted code)
### Process
@@ -21,10 +16,3 @@
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
1. when you want a review, add a reviewer (and only one)
1. if necessary, update your PR, and re- add a reviewer
From [_the Four Agreements_](https://en.wikipedia.org/wiki/Don_Miguel_Ruiz#The_Four_Agreements):
1. Be impeccable with your word.
1. Don't take anything personally.
1. Don't make assumptions.
1. Always do your best.

View File

@@ -13,11 +13,11 @@ It aims to be easy to use on any device supporting modern web technologies (HTML
## XOA quick deploy
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
SSH to your XenServer, and execute the following:
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
[![](./assets/deploy_form.png)](https://xen-orchestra.com/#!/xoa)
```
bash -c "$(curl -s http://xoa.io/deploy)"
```
### XOA credentials

View File

@@ -55,7 +55,6 @@
* [Emergency Shutdown](emergency_shutdown.md)
* [Auto scalability](auto_scalability.md)
* [Forecaster](forecaster.md)
* [SDN Controller](sdn_controller.md)
* [Recipes](recipes.md)
* [Reverse proxy](reverse_proxy.md)
* [How to contribute?](contributing.md)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

View File

@@ -15,6 +15,5 @@ We've made multiple categories to help you to find what you need:
* [Job Manager](scheduler.html)
* [Alerts](alerts.html)
* [Load balancing](load_balancing.html)
* [SDN Controller](sdn_controller.html)
![](./assets/xo5tablet.jpg)

View File

@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
```
$ node -v
v8.16.2
v8.12.0
```
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.

View File

@@ -1,10 +1,14 @@
# Installation
Log in to your account and use the deploy form available on [this page](https://xen-orchestra.com/#!/xoa)
SSH to your XenServer/XCP-ng host and execute the following:
![](./assets/deploy_form.png)
```
bash -c "$(curl -s http://xoa.io/deploy)"
```
## [More on XOA and alternate deploy](xoa.md)
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
## [More on XOA](xoa.md)
![](https://xen-orchestra.com/assets/xoa1.png)

View File

@@ -1,60 +0,0 @@
# SDN Controller
> SDN Controller is available in XOA 5.44 and higher
The SDN Controller enables a user to **create pool-wide and cross-pool** (since XOA 5.48.1) **private networks**.
![](./assets/sdn-controller.png)
## How does it work?
Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.com/blog/xo-sdn-controller/) and its [extension for cross-pool private networks](https://xen-orchestra.com/blog/devblog-3-extending-the-sdn-controller/).
## Usage
### Network creation
In the network creation view:
- Select a `pool`
- Select `Private network`
- Select an interface on which to create the network's tunnels
- Select the encapsulation: a choice is offered between `GRE` and `VxLAN`, if `VxLAN` is chosen, then port 4789 must be open for UDP traffic on all the network's hosts (see [the requirements](#vxlan))
- Choose if the network should be encrypted or not (see [the requirements](#encryption) to use encryption)
- Select other `pool`s to add them to the network if desired
- For each added `pool`: select an interface on which to create the tunnels
- Create the network
- Have fun! ☺
***NB:***
- All hosts in a private network must be able to reach the other hosts' management interface.
> The term management interface is used to indicate the IP-enabled NIC that carries the management traffic.
- Only 1 encrypted GRE network and 1 encrypted VxLAN network per pool can exist at a time due to Open vSwitch limitation.
### Configuration
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
The plugin's configuration contains:
- `cert-dir`: The path where the plugin will look for the certificates to create SSL connections with the hosts.
If none is provided, the plugin will create its own self-signed certificates.
- `override-certs`: Enable to uninstall the existing SDN controller CA certificate in order to replace it with the plugin's one.
## Requirements
### VxLAN
- On XCP-ng prior to 7.6:
- To be able to use `VxLAN`, the following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted: `-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
### Encryption
> Encryption is not available prior to 8.0.
- On XCP-ng 8.0:
- To be able to encrypt the networks, `openvswitch-ipsec` package must be installed on all the hosts:
- `yum install openvswitch-ipsec --enablerepo=xcp-ng-testing`
- `systemctl enable ipsec`
- `systemctl enable openvswitch-ipsec`
- `systemctl start ipsec`
- `systemctl start openvswitch-ipsec`

View File

@@ -110,17 +110,16 @@ $ systemctl restart xo-server
### Behind a transparent proxy
If you're behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
If your are behind a transparent proxy, you'll probably have issues with the updater (SSL/TLS issues).
Run the following commands to allow the updater to work:
First, run the following commands:
```
$ sudo -s
$ echo NODE_TLS_REJECT_UNAUTHORIZED=0 >> /etc/xo-appliance/env
$ npm config -g set strict-ssl=false
$ systemctl restart xoa-updater
```
Now try running an update again.
Then, restart the updater with `systemctl restart xoa-updater`.
### Updating SSL self-signed certificate

View File

@@ -41,20 +41,6 @@ However, if you want to start a manual check, you can do it by clicking on the "
![](./assets/xo5updatebutton.png)
#### Release channel
In Xen Orchestra, you can make a choice between two different release channels.
##### Stable
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
##### Latest
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
> To select the release channel of your choice, go to the XOA > Updates view.
![](./assets/release-channels.png)
#### Upgrade
If a new version is found, you'll have an upgrade button and its tooltip displayed:

View File

@@ -22,36 +22,26 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
### The quickest way
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
![](./assets/deploy_form.png)
### Via a bash script
Alternatively, you can deploy it by connecting to your XenServer host and executing the following:
The fastest way to install Xen Orchestra is to use our appliance deploy script. You can deploy it by connecting to your XenServer host and executing the following:
```
bash -c "$(curl -s http://xoa.io/deploy)"
```
**Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
> **Note:** This won't write or modify anything on your XenServer host: it will just import the XOA VM into your default storage repository.
Follow the instructions:
Now follow the instructions:
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
### Via download the XVA
### The alternative
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
## First Login
Once you have started the VM, you can access the web UI by putting the IP you configured during deployment into your web browser. If you did not configure an IP or are unsure, try one of the following methods to find it:
* Run `xe vm-list params=name-label,networks | grep -A 1 XOA` on your host

View File

@@ -8,22 +8,22 @@
"benchmark": "^2.1.4",
"eslint": "^6.0.1",
"eslint-config-prettier": "^6.0.0",
"eslint-config-standard": "14.1.0",
"eslint-config-standard-jsx": "^8.1.0",
"eslint-config-standard": "12.0.0",
"eslint-config-standard-jsx": "^6.0.2",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^10.0.0",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.109.0",
"flow-bin": "^0.102.0",
"globby": "^10.0.0",
"husky": "^3.0.0",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"sorted-object": "^2.0.1"
},
"engines": {
@@ -46,7 +46,6 @@
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"timers": "fake",
"transform": {
"\\.jsx?$": "babel-jest"
}

View File

@@ -35,8 +35,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.1",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -33,8 +33,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -28,7 +28,7 @@
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.1",
"cli-progress": "^3.1.0",
"cli-progress": "^2.0.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
@@ -39,11 +39,11 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.14.0",
"rimraf": "^3.0.0",
"promise-toolbox": "^0.13.0",
"rimraf": "^2.6.1",
"tmp": "^0.1.0"
},
"scripts": {

View File

@@ -21,13 +21,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -38,13 +37,13 @@
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^3.0.0",
"rimraf": "^2.6.2",
"tmp": "^0.1.0"
},
"scripts": {

View File

@@ -1,5 +1,4 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
@@ -14,23 +13,18 @@ import {
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const { warn } = createLogger('vhd-lib:createSyntheticStream')
export default async function createSyntheticStream(handler, paths) {
export default async function createSyntheticStream(handler, path) {
const fds = []
const cleanup = () => {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
warn('error while closing file', {
error,
fd: fds[i],
})
console.warn('createReadStream, closeFd', i, error)
})
}
}
try {
const vhds = []
const open = async path => {
while (true) {
const fd = await handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
@@ -38,18 +32,11 @@ export default async function createSyntheticStream(handler, paths) {
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
return vhd
}
if (typeof paths === 'string') {
let path = paths
let vhd
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
} else {
for (const path of paths) {
await open(path)
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
@@ -16,7 +15,10 @@ import {
SECTOR_SIZE,
} from './_constants'
const { debug } = createLogger('vhd-lib:Vhd')
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//

View File

@@ -41,15 +41,15 @@
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.27.2"
"xen-api": "^0.27.1"
},
"devDependencies": {
"@babel/cli": "^7.1.5",
"@babel/core": "^7.1.5",
"@babel/preset-env": "^7.1.5",
"babel-plugin-lodash": "^3.2.11",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.4",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,189 +0,0 @@
{
"requires": true,
"lockfileVersion": 1,
"dependencies": {
"core-util-is": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
"integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac="
},
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
"event-loop-delay": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/event-loop-delay/-/event-loop-delay-1.0.0.tgz",
"integrity": "sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==",
"requires": {
"napi-macros": "^1.8.2",
"node-gyp-build": "^3.7.0"
}
},
"getopts": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
"integrity": "sha512-9jb7AW5p3in+IiJWhQiZmmwkpLaR/ccTWdWQCtZM66HJcHHLegowh4q4tSD7gouUyeNvFWRavfK9GXosQHDpFA=="
},
"golike-defer": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
},
"human-format": {
"version": "0.10.1",
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
"integrity": "sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg=="
},
"inherits": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
},
"isarray": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"make-error": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
"integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g=="
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"napi-macros": {
"version": "1.8.2",
"resolved": "https://registry.npmjs.org/napi-macros/-/napi-macros-1.8.2.tgz",
"integrity": "sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg=="
},
"node-gyp-build": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-3.9.0.tgz",
"integrity": "sha512-zLcTg6P4AbcHPq465ZMFNXx7XpKKJh+7kkN699NiQWisR2uWYOWNWqRHAmbnmKiL4e9aLSlmy5U7rEMUXV59+A=="
},
"prettier-bytes": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/prettier-bytes/-/prettier-bytes-1.0.4.tgz",
"integrity": "sha1-mUsCqkb2mcULYle1+qp/4lV+YtY="
},
"process-nextick-args": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
"integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
},
"process-top": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/process-top/-/process-top-1.0.0.tgz",
"integrity": "sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==",
"requires": {
"event-loop-delay": "^1.0.0",
"prettier-bytes": "^1.0.4"
}
},
"progress-stream": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/progress-stream/-/progress-stream-2.0.0.tgz",
"integrity": "sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=",
"requires": {
"speedometer": "~1.0.0",
"through2": "~2.0.3"
}
},
"promise-toolbox": {
"version": "0.13.0",
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.13.0.tgz",
"integrity": "sha512-Z6u7EL9/QyY1zZqeqpEiKS7ygKwZyl0JL0ouno/en6vMliZZc4AmM0aFCrDAVxEyKqj2f3SpkW0lXEfAZsNWiQ==",
"requires": {
"make-error": "^1.3.2"
}
},
"readable-stream": {
"version": "3.4.0",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.4.0.tgz",
"integrity": "sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ==",
"requires": {
"inherits": "^2.0.3",
"string_decoder": "^1.1.1",
"util-deprecate": "^1.0.1"
}
},
"safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"speedometer": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/speedometer/-/speedometer-1.0.0.tgz",
"integrity": "sha1-zWccsGdSwivKM3Di8zREC+T8YuI="
},
"stream-parser": {
"version": "0.3.1",
"resolved": "https://registry.npmjs.org/stream-parser/-/stream-parser-0.3.1.tgz",
"integrity": "sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=",
"requires": {
"debug": "2"
}
},
"string_decoder": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
"integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
"requires": {
"safe-buffer": "~5.1.0"
}
},
"throttle": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
"integrity": "sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=",
"requires": {
"readable-stream": ">= 0.3.0",
"stream-parser": ">= 0.0.2"
}
},
"through2": {
"version": "2.0.5",
"resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
"integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
"requires": {
"readable-stream": "~2.3.6",
"xtend": "~4.0.1"
},
"dependencies": {
"readable-stream": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
}
}
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
},
"xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
"integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
}
}
}

View File

@@ -5,7 +5,7 @@
"human-format": "^0.10.1",
"process-top": "^1.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.11.0",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3"
}

View File

@@ -0,0 +1,179 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
core-util-is@~1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
debug@2:
version "2.6.9"
resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f"
integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==
dependencies:
ms "2.0.0"
event-loop-delay@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/event-loop-delay/-/event-loop-delay-1.0.0.tgz#5af6282549494fd0d868c499cbdd33e027978b8c"
integrity sha512-8YtyeIWHXrvTqlAhv+fmtaGGARmgStbvocERYzrZ3pwhnQULe5PuvMUTjIWw/emxssoaftfHZsJtkeY8xjiXCg==
dependencies:
napi-macros "^1.8.2"
node-gyp-build "^3.7.0"
getopts@^2.2.3:
version "2.2.3"
resolved "https://registry.yarnpkg.com/getopts/-/getopts-2.2.3.tgz#11d229775e2ec2067ed8be6fcc39d9b4bf39cf7d"
integrity sha512-viEcb8TpgeG05+Nqo5EzZ8QR0hxdyrYDp6ZSTZqe2M/h53Bk036NmqG38Vhf5RGirC/Of9Xql+v66B2gp256SQ==
golike-defer@^0.4.1:
version "0.4.1"
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
human-format@^0.10.1:
version "0.10.1"
resolved "https://registry.yarnpkg.com/human-format/-/human-format-0.10.1.tgz#107793f355912e256148d5b5dcf66a0230187ee9"
integrity sha512-UzCHToSw3HI9MxH9tYzMr1JbHJbgzr6o0hZCun7sruv59S1leps21bmgpBkkwEvQon5n/2OWKH1iU7BEko02cg==
inherits@^2.0.3, inherits@~2.0.3:
version "2.0.3"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de"
integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
isarray@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11"
integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=
make-error@^1.3.2:
version "1.3.5"
resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.5.tgz#efe4e81f6db28cadd605c70f29c831b58ef776c8"
integrity sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==
ms@2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8"
integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=
napi-macros@^1.8.2:
version "1.8.2"
resolved "https://registry.yarnpkg.com/napi-macros/-/napi-macros-1.8.2.tgz#299265c1d8aa401351ad0675107d751228c03eda"
integrity sha512-Tr0DNY4RzTaBG2W2m3l7ZtFuJChTH6VZhXVhkGGjF/4cZTt+i8GcM9ozD+30Lmr4mDoZ5Xx34t2o4GJqYWDGcg==
node-gyp-build@^3.7.0:
version "3.7.0"
resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.7.0.tgz#daa77a4f547b9aed3e2aac779eaf151afd60ec8d"
integrity sha512-L/Eg02Epx6Si2NXmedx+Okg+4UHqmaf3TNcxd50SF9NQGcJaON3AtU++kax69XV7YWz4tUspqZSAsVofhFKG2w==
prettier-bytes@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/prettier-bytes/-/prettier-bytes-1.0.4.tgz#994b02aa46f699c50b6257b5faaa7fe2557e62d6"
integrity sha1-mUsCqkb2mcULYle1+qp/4lV+YtY=
process-nextick-args@~2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.0.tgz#a37d732f4271b4ab1ad070d35508e8290788ffaa"
integrity sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==
process-top@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/process-top/-/process-top-1.0.0.tgz#52892bedb581c5abf0df2d0aa5c429e34275cc7e"
integrity sha512-er8iSmBMslOt5cgIHg9m6zilTPsuUqpEb1yfQ4bDmO80zr/e/5hNn+Tay3CJM/FOBnJo8Bt3fFiDDH6GvIgeAg==
dependencies:
event-loop-delay "^1.0.0"
prettier-bytes "^1.0.4"
progress-stream@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/progress-stream/-/progress-stream-2.0.0.tgz#fac63a0b3d11deacbb0969abcc93b214bce19ed5"
integrity sha1-+sY6Cz0R3qy7CWmrzJOyFLzhntU=
dependencies:
speedometer "~1.0.0"
through2 "~2.0.3"
promise-toolbox@^0.11.0:
version "0.11.0"
resolved "https://registry.yarnpkg.com/promise-toolbox/-/promise-toolbox-0.11.0.tgz#9ed928355355395072dace3f879879504e07d1bc"
integrity sha512-bjHk0kq+Ke3J3zbkbbJH6kXCyQZbFHwOTrE/Et7vS0uS0tluoV+PLqU/kEyxl8aARM7v04y2wFoDo/wWAEPvjA==
dependencies:
make-error "^1.3.2"
"readable-stream@>= 0.3.0", readable-stream@^3.1.1:
version "3.1.1"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.1.1.tgz#ed6bbc6c5ba58b090039ff18ce670515795aeb06"
integrity sha512-DkN66hPyqDhnIQ6Jcsvx9bFjhw214O4poMBcIMgPVpQvNy9a0e0Uhg5SqySyDKAmUlwt8LonTBz1ezOnM8pUdA==
dependencies:
inherits "^2.0.3"
string_decoder "^1.1.1"
util-deprecate "^1.0.1"
readable-stream@~2.3.6:
version "2.3.6"
resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf"
integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==
dependencies:
core-util-is "~1.0.0"
inherits "~2.0.3"
isarray "~1.0.0"
process-nextick-args "~2.0.0"
safe-buffer "~5.1.1"
string_decoder "~1.1.1"
util-deprecate "~1.0.1"
safe-buffer@~5.1.0, safe-buffer@~5.1.1:
version "5.1.2"
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d"
integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==
speedometer@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/speedometer/-/speedometer-1.0.0.tgz#cd671cb06752c22bca3370e2f334440be4fc62e2"
integrity sha1-zWccsGdSwivKM3Di8zREC+T8YuI=
"stream-parser@>= 0.0.2":
version "0.3.1"
resolved "https://registry.yarnpkg.com/stream-parser/-/stream-parser-0.3.1.tgz#1618548694420021a1182ff0af1911c129761773"
integrity sha1-FhhUhpRCACGhGC/wrxkRwSl2F3M=
dependencies:
debug "2"
string_decoder@^1.1.1:
version "1.2.0"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.2.0.tgz#fe86e738b19544afe70469243b2a1ee9240eae8d"
integrity sha512-6YqyX6ZWEYguAxgZzHGL7SsCeGx3V2TtOTqZz1xSTSWnqsbWwbptafNyvf/ACquZUXV3DANr5BDIwNYe1mN42w==
dependencies:
safe-buffer "~5.1.0"
string_decoder@~1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8"
integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==
dependencies:
safe-buffer "~5.1.0"
throttle@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/throttle/-/throttle-1.0.3.tgz#8a32e4a15f1763d997948317c5ebe3ad8a41e4b7"
integrity sha1-ijLkoV8XY9mXlIMXxevjrYpB5Lc=
dependencies:
readable-stream ">= 0.3.0"
stream-parser ">= 0.0.2"
through2@~2.0.3:
version "2.0.5"
resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd"
integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==
dependencies:
readable-stream "~2.3.6"
xtend "~4.0.1"
util-deprecate@^1.0.1, util-deprecate@~1.0.1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf"
integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=
xtend@~4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.1.tgz#a5c6d532be656e23db820efb943a1f04998d63af"
integrity sha1-pcbVMr5lbiPbgg77lDofBJmNY68=

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.27.2",
"version": "0.27.1",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -46,7 +46,7 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -60,8 +60,8 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,4 +1,4 @@
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?(?:\[([^\]]+)\]|([^:/]+))(?::([0-9]+))?\/?$/
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
export default url => {
const matches = URL_RE.exec(url)
@@ -6,15 +6,7 @@ export default url => {
throw new Error('invalid URL: ' + url)
}
const [
,
protocol = 'https:',
username,
password,
ipv6,
hostname = ipv6,
port,
] = matches
const [, protocol = 'https:', username, password, hostname, port] = matches
const parsedUrl = { protocol, hostname, port }
if (username !== undefined) {
parsedUrl.username = decodeURIComponent(username)

View File

@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find } from 'lodash'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
@@ -110,7 +110,7 @@ const main = async args => {
asCallback.call(
fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
}).then(value => (isArray(value) ? Promise.all(value) : value)),
cb
)
})(repl.eval)

View File

@@ -4,7 +4,7 @@ import kindOf from 'kindof'
import ms from 'ms'
import httpRequest from 'http-request-plus'
import { EventEmitter } from 'events'
import { map, noop, omit } from 'lodash'
import { isArray, map, noop, omit } from 'lodash'
import {
cancelable,
defer,
@@ -113,7 +113,7 @@ export class Xapi extends EventEmitter {
this._watchedTypes = undefined
const { watchEvents } = opts
if (watchEvents !== false) {
if (Array.isArray(watchEvents)) {
if (isArray(watchEvents)) {
this._watchedTypes = watchEvents
}
this.watchEvents()
@@ -1075,7 +1075,7 @@ export class Xapi extends EventEmitter {
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
const value = data[field]
if (Array.isArray(value)) {
if (isArray(value)) {
if (value.length === 0 || isOpaqueRef(value[0])) {
getters[$field] = function() {
const value = this[field]

View File

@@ -38,16 +38,16 @@
"human-format": "^0.10.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"micromatch": "^3.1.3",
"mkdirp": "^0.5.1",
"nice-pipe": "0.0.0",
"pretty-ms": "^5.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"strip-indent": "^2.0.0",
"xdg-basedir": "^3.0.0",
"xo-lib": "^0.9.0"
},
"devDependencies": {
@@ -56,8 +56,8 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -7,6 +7,7 @@ const promisify = require('bluebird').promisify
const readFile = promisify(require('fs').readFile)
const writeFile = promisify(require('fs').writeFile)
const assign = require('lodash/assign')
const l33t = require('l33teral')
const mkdirp = promisify(require('mkdirp'))
const xdgBasedir = require('xdg-basedir')
@@ -40,7 +41,7 @@ const save = (exports.save = function(config) {
exports.set = function(data) {
return load().then(function(config) {
return save(Object.assign(config, data))
return save(assign(config, data))
})
}

View File

@@ -17,6 +17,7 @@ const getKeys = require('lodash/keys')
const hrp = require('http-request-plus').default
const humanFormat = require('human-format')
const identity = require('lodash/identity')
const isArray = require('lodash/isArray')
const isObject = require('lodash/isObject')
const micromatch = require('micromatch')
const nicePipe = require('nice-pipe')
@@ -198,18 +199,7 @@ function main(args) {
return exports[fnName](args.slice(1))
}
return exports.call(args).catch(error => {
if (!(error != null && error.code === 10 && 'errors' in error.data)) {
throw error
}
const lines = [error.message]
const { errors } = error.data
errors.forEach(error => {
lines.push(` property ${error.property}: ${error.message}`)
})
throw lines.join('\n')
})
return exports.call(args)
}
exports = module.exports = main
@@ -297,11 +287,7 @@ async function listCommands(args) {
str.push(
name,
'=<',
type == null
? 'unknown type'
: Array.isArray(type)
? type.join('|')
: type,
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
'>'
)

View File

@@ -34,9 +34,9 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"rimraf": "^3.0.0"
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -36,8 +36,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,5 +1,5 @@
import { BaseError } from 'make-error'
import { iteratee } from 'lodash'
import { isArray, iteratee } from 'lodash'
class XoError extends BaseError {
constructor({ code, message, data }) {
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
}))
export const invalidParameters = create(10, (message, errors) => {
if (Array.isArray(message)) {
if (isArray(message)) {
errors = message
message = undefined
}

View File

@@ -41,8 +41,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -32,9 +32,9 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"deep-freeze": "^0.0.1",
"rimraf": "^3.0.0"
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -40,8 +40,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -36,18 +36,18 @@
"dependencies": {
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"inquirer": "^7.0.0",
"inquirer": "^6.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -40,8 +40,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-preset-env": "^1.6.1",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -36,7 +36,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/log": "^0.1.4",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -48,8 +48,8 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -354,7 +354,7 @@ class BackupReportsXoPlugin {
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
@@ -390,7 +390,7 @@ class BackupReportsXoPlugin {
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
@@ -646,7 +646,7 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
@@ -656,7 +656,7 @@ class BackupReportsXoPlugin {
})
}
_sendReport({ markdown, subject, success, nagiosMarkdown }) {
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
@@ -676,14 +676,9 @@ class BackupReportsXoPlugin {
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: success ? 0 : 2,
status: nagiosStatus,
message: nagiosMarkdown,
}),
xo.sendIcinga2Status !== undefined &&
xo.sendIcinga2Status({
status: success ? 'OK' : 'CRITICAL',
message: markdown,
}),
])
}
@@ -713,7 +708,7 @@ class BackupReportsXoPlugin {
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
})
}
@@ -909,7 +904,7 @@ class BackupReportsXoPlugin {
? ICON_FAILURE
: ICON_SKIPPED
}`,
success: globalSuccess,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${

View File

@@ -0,0 +1,10 @@
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/test/
/tests/
*.spec.js
*.spec.js.map

View File

@@ -1,6 +1,4 @@
# xo-server-transport-icinga2 [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
> xo-server plugin to send status to icinga2 server
# xo-server-cloud [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
## Install
@@ -13,13 +11,6 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
## Development
### `Xo#sendIcinga2Status({ status, message })`
This xo method is called to send a passive check to icinga2 and change the status of a service.
It has two parameters:
- status: it's the service status in icinga2 (0: OK | 1: WARNING | 2: CRITICAL | 3: UNKNOWN).
- message: it's the status information in icinga2.
```
# Install dependencies
> npm install

View File

@@ -1,35 +1,46 @@
{
"name": "@xen-orchestra/template",
"version": "0.1.0",
"name": "xo-server-cloud",
"version": "0.2.4",
"license": "ISC",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/template",
"description": "",
"keywords": [
"cloud",
"orchestra",
"plugin",
"xen",
"xen-orchestra",
"xo-server"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/template",
"directory": "packages/xo-server-cloud",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Julien Fontanet",
"email": "julien.fontanet@vates.fr"
"name": "Pierre Donias",
"email": "pierre.donias@gmail.com"
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.5.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.5.4"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -37,10 +48,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish --access public"
"prepublishOnly": "yarn run build"
},
"dependencies": {
"lodash": "^4.17.15"
}
"private": true
}

View File

@@ -0,0 +1,158 @@
import Client, { createBackoff } from 'jsonrpc-websocket-client'
import hrp from 'http-request-plus'
const WS_URL = 'ws://localhost:9001'
const HTTP_URL = 'http://localhost:9002'
// ===================================================================
class XoServerCloud {
constructor({ xo }) {
this._xo = xo
// Defined in configure().
this._conf = null
this._key = null
}
configure(configuration) {
this._conf = configuration
}
async load() {
const getResourceCatalog = () => this._getCatalog()
getResourceCatalog.description = 'Get the list of all available resources'
getResourceCatalog.permission = 'admin'
const registerResource = ({ namespace }) =>
this._registerResource(namespace)
registerResource.description = 'Register a resource via cloud plugin'
registerResource.params = {
namespace: {
type: 'string',
},
}
registerResource.permission = 'admin'
this._unsetApiMethods = this._xo.addApiMethods({
cloud: {
getResourceCatalog,
registerResource,
},
})
this._unsetRequestResource = this._xo.defineProperty(
'requestResource',
this._requestResource,
this
)
const updater = (this._updater = new Client(WS_URL))
const connect = () =>
updater.open(createBackoff()).catch(error => {
console.error('xo-server-cloud: fail to connect to updater', error)
return connect()
})
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
console.warn('xo-server-cloud: next attempt in %s ms', delay)
})
connect()
}
unload() {
this._unsetApiMethods()
this._unsetRequestResource()
}
// ----------------------------------------------------------------
async _getCatalog() {
const catalog = await this._updater.call('getResourceCatalog')
if (!catalog) {
throw new Error('cannot get catalog')
}
return catalog
}
// ----------------------------------------------------------------
async _getNamespaces() {
const catalog = await this._getCatalog()
if (!catalog._namespaces) {
throw new Error('cannot get namespaces')
}
return catalog._namespaces
}
// ----------------------------------------------------------------
async _registerResource(namespace) {
const _namespace = (await this._getNamespaces())[namespace]
if (_namespace === undefined) {
throw new Error(`${namespace} is not available`)
}
if (_namespace.registered || _namespace.pending) {
throw new Error(`already registered for ${namespace}`)
}
return this._updater.call('registerResource', { namespace })
}
// ----------------------------------------------------------------
async _getNamespaceCatalog(namespace) {
const namespaceCatalog = (await this._getCatalog())[namespace]
if (!namespaceCatalog) {
throw new Error(`cannot get catalog: ${namespace} not registered`)
}
return namespaceCatalog
}
// ----------------------------------------------------------------
async _requestResource(namespace, id, version) {
const _namespace = (await this._getNamespaces())[namespace]
if (!_namespace || !_namespace.registered) {
throw new Error(`cannot get resource: ${namespace} not registered`)
}
const { _token: token } = await this._getNamespaceCatalog(namespace)
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
if (token === undefined) {
throw new Error(`${namespace} namespace token is undefined`)
}
const downloadToken = await this._updater.call('getResourceDownloadToken', {
token,
id,
version,
})
if (!downloadToken) {
throw new Error('cannot get download token')
}
const response = await hrp(HTTP_URL, {
headers: {
Authorization: `Bearer ${downloadToken}`,
},
})
// currently needed for XenApi#putResource()
response.length = response.headers['content-length']
return response
}
}
export default opts => new XoServerCloud(opts)

View File

@@ -31,7 +31,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.5",
"@xen-orchestra/cron": "^1.0.3",
"lodash": "^4.16.2"
},
"devDependencies": {

View File

@@ -21,7 +21,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.5",
"@xen-orchestra/cron": "^1.0.3",
"d3-time-format": "^2.1.1",
"json5": "^2.0.1",
"lodash": "^4.17.4"
@@ -32,8 +32,8 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,6 +1,6 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const COMPARATOR_FN = {
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
Object.assign(result, {
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
definition.alarmTriggerLevel
)
const data = getter(result.object)
Object.assign(result, {
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
@@ -680,7 +680,7 @@ ${entry.listItem}
},
}
if (xapiObject.$type === 'VM') {
payload.vm_uuid = xapiObject.uuid
payload['vm_uuid'] = xapiObject.uuid
}
// JSON is not well formed, can't use the default node parser
return JSON5.parse(

View File

@@ -1,14 +1,31 @@
# xo-server-sdn-controller [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
XO Server plugin that allows the creation of pool-wide and cross-pool private networks.
XO Server plugin that allows the creation of pool-wide private networks.
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Documentation
## Usage
Please see the plugin's [official documentation](https://xen-orchestra.com/docs/sdn_controller.html).
### Network creation
In the network creation view, select a `pool` and `Private network`.
Create the network.
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
### Configuration
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
The plugin's configuration contains:
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
If none is provided, the plugin will create its own self-signed certificates.
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
## Contributions

View File

@@ -15,24 +15,22 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.3.1",
"version": "0.1.1",
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
"cross-env": "^5.2.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/log": "^0.1.4",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.98",
"promise-toolbox": "^0.14.0",
"uuid": "^3.3.2"
"node-openssl-cert": "^0.0.84",
"promise-toolbox": "^0.13.0"
},
"private": true
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,8 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import forOwn from 'lodash/forOwn'
import fromEvent from 'promise-toolbox/fromEvent'
import { connect } from 'tls'
import { forOwn, toPairs } from 'lodash'
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
@@ -10,35 +10,10 @@ const OVSDB_PORT = 6640
// =============================================================================
function toMap(object) {
return ['map', toPairs(object)]
}
// =============================================================================
export class OvsdbClient {
/*
Create an SSL connection to an XCP-ng host.
Interact with the host's OpenVSwitch (OVS) daemon to create and manage the virtual bridges
corresponding to the private networks with OVSDB (OpenVSwitch DataBase) Protocol.
See:
- OVSDB Protocol: https://tools.ietf.org/html/rfc7047
- OVS Tunneling : http://docs.openvswitch.org/en/latest/howto/tunneling/
- OVS IPSEC : http://docs.openvswitch.org/en/latest/howto/ipsec/
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
- `other_config`:
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
Attributes on created OVS interfaces:
- `options`:
- `key` : Network's VNI
- `remote_ip`: Remote IP of the tunnel
*/
constructor(host, clientKey, clientCert, caCert) {
this._numberOfPortAndInterface = 0
this._requestId = 0
this._requestID = 0
this._adding = []
@@ -66,83 +41,78 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
network,
networkUuid,
networkName,
remoteAddress,
encapsulation,
key,
password,
privateNetworkUuid
encapsulation
) {
if (
this._adding.find(
elem => elem.id === network.uuid && elem.addr === remoteAddress
elem => elem.id === networkUuid && elem.addr === remoteAddress
) !== undefined
) {
return
}
const adding = { id: network.uuid, addr: remoteAddress }
const adding = { id: networkUuid, addr: remoteAddress }
this._adding.push(adding)
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const index = this._numberOfPortAndInterface
++this._numberOfPortAndInterface
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
)
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridge,
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
)
return bridge.name
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
return bridgeName
}
const index = ++this._numberOfPortAndInterface
const interfaceName = bridge.name + '_iface' + index
const portName = bridge.name + '_port' + index
const interfaceName = 'tunnel_iface' + index
const portName = 'tunnel_port' + index
// Add interface and port to the bridge
const options = { remote_ip: remoteAddress, key: key }
if (password !== undefined) {
options.psk = password
}
const options = ['map', [['remote_ip', remoteAddress]]]
const addInterfaceOperation = {
op: 'insert',
table: 'Interface',
row: {
type: encapsulation,
options: toMap(options),
options: options,
name: interfaceName,
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_iface',
}
const addPortOperation = {
op: 'insert',
table: 'Port',
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: toMap({
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
}),
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_port',
}
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
@@ -153,9 +123,7 @@ export class OvsdbClient {
]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
)
this._adding = this._adding.slice(this._adding.indexOf(adding), 1)
if (jsonObjects === undefined) {
socket.destroy()
return
@@ -167,7 +135,7 @@ export class OvsdbClient {
let opResult
do {
opResult = jsonObjects[0].result[i]
if (opResult?.error !== undefined) {
if (opResult !== undefined && opResult.error !== undefined) {
error = opResult.error
details = opResult.details
}
@@ -180,8 +148,8 @@ export class OvsdbClient {
details,
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
@@ -191,24 +159,28 @@ export class OvsdbClient {
log.debug('Port and interface added to bridge', {
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
return bridge.name
return bridgeName
}
async resetForNetwork(network, privateNetworkUuid) {
async resetForNetwork(networkUuid, networkName) {
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridge, socket)
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
socket.destroy()
return
@@ -229,19 +201,7 @@ export class OvsdbClient {
}
forOwn(selectResult.other_config[1], config => {
// 2019-09-03
// Compatibility code, to be removed in 1 year.
const oldShouldDelete =
config[0] === 'private_pool_wide' ||
config[0] === 'cross_pool' ||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
config[0] === 'xo:sdn-controller:cross-pool'
const shouldDelete =
config[0] === 'xo:sdn-controller:private-network-uuid' &&
config[1] === privateNetworkUuid
if (shouldDelete || oldShouldDelete) {
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
portsToDelete.push(['uuid', portUuid])
}
})
@@ -256,7 +216,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
@@ -269,7 +229,7 @@ export class OvsdbClient {
if (jsonObjects[0].error != null) {
log.error('Error while deleting ports from bridge', {
error: jsonObjects[0].error,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -278,7 +238,7 @@ export class OvsdbClient {
log.debug('Ports deleted from bridge', {
nPorts: jsonObjects[0].result[0].count,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -296,9 +256,9 @@ export class OvsdbClient {
for (let i = pos; i < data.length; ++i) {
const c = data.charAt(i)
if (c === '{') {
++depth
depth++
} else if (c === '}') {
--depth
depth--
if (depth === 0) {
const object = JSON.parse(buffer + data.substr(0, i + 1))
objects.push(object)
@@ -316,9 +276,13 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _getBridgeForNetwork(network, socket) {
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
[
'external_ids',
'includes',
['map', [['xs-network-uuids', networkUuid]]],
],
]
const selectResult = await this._select(
'Bridge',
@@ -328,17 +292,25 @@ export class OvsdbClient {
)
if (selectResult === undefined) {
log.error('No bridge found for network', {
network: network.name_label,
network: networkName,
host: this.host.name_label,
})
return {}
return []
}
return { uuid: selectResult._uuid[1], name: selectResult.name }
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
const ports = await this._getBridgePorts(bridge, socket)
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
return false
}
@@ -357,7 +329,7 @@ export class OvsdbClient {
remoteAddress,
socket
)
if (hasRemote) {
if (hasRemote === true) {
return true
}
}
@@ -366,8 +338,8 @@ export class OvsdbClient {
return false
}
async _getBridgePorts(bridge, socket) {
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult === undefined) {
return
@@ -465,7 +437,9 @@ export class OvsdbClient {
async _sendOvsdbTransaction(params, socket) {
const stream = socket
const requestId = ++this._requestId
const requestId = this._requestID
++this._requestID
const req = {
id: requestId,
method: 'transact',

View File

@@ -1,202 +0,0 @@
import createLogger from '@xen-orchestra/log'
import { filter, find, forOwn, map, sample } from 'lodash'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:private-network')
// =============================================================================
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
const createPassword = () =>
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
// =============================================================================
export class PrivateNetwork {
constructor(controller, uuid) {
this.controller = controller
this.uuid = uuid
this.networks = {}
}
// ---------------------------------------------------------------------------
async addHost(host) {
if (host.$ref === this.center?.$ref) {
// Nothing to do
return
}
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
log.error('No OVSDB client found', {
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
const centerClient = this.controller.ovsdbClients[this.center.$ref]
if (centerClient === undefined) {
log.error('No OVSDB client found for star-center', {
privateNetwork: this.uuid,
host: this.center.name_label,
pool: this.center.$pool.name_label,
})
return
}
const network = this.networks[host.$pool.uuid]
const centerNetwork = this.networks[this.center.$pool.uuid]
const otherConfig = network.other_config
const encapsulation =
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
const password =
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
])
} catch (error) {
log.error('Error while connecting host to private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
log.info('Host added', {
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return bridgeName
}
addNetwork(network) {
this.networks[network.$pool.uuid] = network
log.info('Adding network', {
privateNetwork: this.uuid,
network: network.name_label,
pool: network.$pool.name_label,
})
if (this.center === undefined) {
return this.electNewCenter()
}
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
return Promise.all(
map(hosts, async host => {
const hostClient = this.controller.ovsdbClients[host.$ref]
const network = this.networks[host.$pool.uuid]
await hostClient.resetForNetwork(network, this.uuid)
await this.addHost(host)
})
)
}
async electNewCenter() {
delete this.center
// TODO: make it random
const hosts = this._getHosts()
for (const host of hosts) {
const pif = find(host.$PIFs, {
network: this.networks[host.$pool.uuid].$ref,
})
if (pif?.currently_attached && host.$metrics.live) {
this.center = host
break
}
}
if (this.center === undefined) {
log.error('No available host to elect new star-center', {
privateNetwork: this.uuid,
})
return
}
await this._reset()
// Recreate star topology
await Promise.all(map(hosts, host => this.addHost(host)))
log.info('New star-center elected', {
center: this.center.name_label,
privateNetwork: this.uuid,
})
}
// ---------------------------------------------------------------------------
getPools() {
const pools = []
forOwn(this.networks, network => {
pools.push(network.$pool)
})
return pools
}
// ---------------------------------------------------------------------------
_reset() {
return Promise.all(
map(this._getHosts(), async host => {
// Clean old ports and interfaces
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
return
}
const network = this.networks[host.$pool.uuid]
try {
await hostClient.resetForNetwork(network, this.uuid)
} catch (error) {
log.error('Error while resetting private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: network.$pool.name_label,
})
}
})
)
}
// ---------------------------------------------------------------------------
_getHosts() {
const hosts = []
forOwn(this.networks, network => {
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
})
return hosts
}
}

View File

@@ -15,20 +15,15 @@ src
| | └─ index.spec.js.snap
| └─ index.spec.js
├─ job
| └─ index.spec.js
├─ issues
¦ └─ index.spec.js
¦
¦
├─ _xoConnection.js
└─ util.js
```
The tests can describe:
- XO methods or scenarios:
`src/user/index.js`
```js
The tests can describe xo methods or scenarios:
```javascript
import xo from "../_xoConnection";
describe("user", () => {
@@ -51,16 +46,6 @@ describe("user", () => {
});
});
```
- issues
`src/issues/index.js`
```js
describe("issue", () => {
test("5454", () => {
/* some tests */
})
})
```
### Best practices
@@ -141,8 +126,7 @@ describe("user", () => {
After each run of the tests, check that snapshots are not inadvertently modified.
- ⚠ Jest known issue ⚠
If a test timeout is triggered the next async tests can fail, it's due to an inadvertently modified snapshots.
As a workaround, you can clean your git working tree and re-run jest using a large timeout: `> yarn test --testTimeout=100000`
If a test timeout is triggered the next async tests can fail, it is due to an inadvertently modified snapshots.
## Contributions

View File

@@ -36,7 +36,7 @@
"golike-defer": "^0.4.1",
"jest": "^24.8.0",
"lodash": "^4.17.11",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-lib": "^0.9.0"

View File

@@ -3,9 +3,6 @@
email = ''
password = ''
[pools]
default = ''
[servers]
[servers.default]
username = ''
@@ -14,7 +11,6 @@
[vms]
default = ''
withOsAndXenTools = ''
# vmToBackup = ''
[templates]
@@ -23,6 +19,8 @@
[srs]
default = ''
localSr = ''
sharedSr = ''
[remotes]
default = { name = '', url = '' }

View File

@@ -1,6 +0,0 @@
export const getDefaultName = () => `xo-server-test ${new Date().toISOString()}`
export const getDefaultSchedule = () => ({
name: getDefaultName(),
cron: '0 * * * * *',
})

View File

@@ -2,11 +2,15 @@
import defer from 'golike-defer'
import Xo from 'xo-lib'
import XoCollection from 'xo-collection'
import { defaultsDeep, find, forOwn, pick } from 'lodash'
import { find, forOwn } from 'lodash'
import { fromEvent } from 'promise-toolbox'
import config from './_config'
import { getDefaultName } from './_defaultValues'
const getDefaultCredentials = () => {
const { email, password } = config.xoConnection
return { email, password }
}
class XoConnection extends Xo {
constructor(opts) {
@@ -68,10 +72,7 @@ class XoConnection extends Xo {
}
@defer
async connect(
$defer,
credentials = pick(config.xoConnection, 'email', 'password')
) {
async connect($defer, credentials = getDefaultCredentials()) {
await this.open()
$defer.onFailure(() => this.close())
@@ -86,7 +87,7 @@ class XoConnection extends Xo {
while (true) {
try {
await predicate(obj)
return obj
return
} catch (_) {}
// If failed, wait for next object state/update and retry.
obj = await this.waitObject(id)
@@ -110,61 +111,18 @@ class XoConnection extends Xo {
}
async createTempBackupNgJob(params) {
// mutate and inject default values
defaultsDeep(params, {
mode: 'full',
name: getDefaultName(),
settings: {
'': {
// it must be enabled because the XAPI might be not able to coalesce VDIs
// as fast as the tests run
//
// see https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
bypassVdiChainsCheck: true,
// it must be 'never' to avoid race conditions with the plugin `backup-reports`
reportWhen: 'never',
},
},
})
const id = await this.call('backupNg.createJob', params)
this._tempResourceDisposers.push('backupNg.deleteJob', { id })
return this.call('backupNg.getJob', { id })
}
async createTempNetwork(params) {
const id = await this.call('network.create', {
name: 'XO Test',
pool: config.pools.default,
...params,
})
this._tempResourceDisposers.push('network.delete', { id })
return this.getOrWaitObject(id)
const job = await this.call('backupNg.createJob', params)
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
return job
}
async createTempVm(params) {
const id = await this.call('vm.create', {
name_label: getDefaultName(),
template: config.templates.templateWithoutDisks,
...params,
})
const id = await this.call('vm.create', params)
this._tempResourceDisposers.push('vm.delete', { id })
return this.waitObjectState(id, vm => {
await this.waitObjectState(id, vm => {
if (vm.type !== 'VM') throw new Error('retry')
})
}
async startTempVm(id, params, withXenTools = false) {
await this.call('vm.start', { id, ...params })
this._tempResourceDisposers.push('vm.stop', { id, force: true })
return this.waitObjectState(id, vm => {
if (
vm.power_state !== 'Running' ||
(withXenTools && vm.xenTools === false)
) {
throw new Error('retry')
}
})
return id
}
async createTempRemote(params) {
@@ -223,24 +181,26 @@ class XoConnection extends Xo {
)
})
})
}
forOwn(this.objects.all, (obj, id) => {
if (
obj.other !== undefined &&
obj.other['xo:backup:job'] === jobId &&
obj.other['xo:backup:schedule'] === scheduleId
) {
this._tempResourceDisposers.push('vm.delete', {
id,
})
for (const id in this.objects.all) {
if (this.objects.all[id].other) {
const { 'xo:backup:schedule': snapshotSchedule } = this.objects.all[
id
].other
if (snapshotSchedule === scheduleId) {
this._tempResourceDisposers.push('vm.delete', {
id,
})
}
}
}
})
}
return backups
}
getBackupLogs(filter) {
return this.call('backupNg.getLogs', { _forceRefresh: true, ...filter })
async importVmBackup(params) {
const id = await xo.call('backupNg.importVmBackup', params)
this._tempResourceDisposers.push('vm.delete', { id })
return id
}
async _cleanDisposers(disposers) {

View File

@@ -1,6 +1,61 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
Object {
"id": Any<String>,
"mode": "full",
"name": "default-backupNg",
"settings": Any<Object>,
"type": "backup",
"userId": Any<String>,
"vms": Any<Object>,
}
`;
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
Object {
"cron": "0 * * * * *",
"enabled": false,
"id": Any<String>,
"jobId": Any<String>,
"name": "scheduleTest",
}
`;
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
Object {
"id": Any<String>,
"mode": "full",
"name": "default-backupNg",
"settings": Object {
"": Object {
"reportWhen": "never",
},
},
"type": "backup",
"userId": Any<String>,
"vms": Any<Object>,
}
`;
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "skipped",
}
`;
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -37,6 +92,23 @@ Array [
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "failure",
}
`;
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -55,83 +127,37 @@ Object {
}
`;
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -142,7 +168,7 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -157,19 +183,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
Object {
"end": Any<Number>,
@@ -184,6 +197,19 @@ Object {
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -198,19 +224,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
Object {
"end": Any<Number>,
@@ -226,13 +239,12 @@ Object {
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
@@ -240,10 +252,15 @@ Object {
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
Object {
"data": Object {
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"message": "snapshot",
"result": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
@@ -253,8 +270,7 @@ exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retent
Object {
"data": Object {
"id": Any<String>,
"isFull": false,
"type": "remote",
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
@@ -268,29 +284,14 @@ exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retent
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"message": "snapshot",
"result": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -305,6 +306,19 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
Object {
"end": Any<Number>,
@@ -319,6 +333,21 @@ Object {
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": false,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -331,7 +360,36 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
Object {
"data": Object {
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -345,7 +403,7 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -356,47 +414,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": true,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
Object {
"data": Object {
@@ -438,7 +455,65 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 25`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": true,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 26`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 27`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -449,7 +524,7 @@ Object {
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
Object {
"data": Object {
"id": Any<String>,

View File

@@ -6,47 +6,23 @@ import { noSuchObject } from 'xo-common/api-errors'
import config from '../_config'
import randomId from '../_randomId'
import xo from '../_xoConnection'
import { getDefaultName, getDefaultSchedule } from '../_defaultValues'
const validateBackupJob = (jobInput, jobOutput, createdSchedule) => {
const expectedObj = {
id: expect.any(String),
mode: jobInput.mode,
name: jobInput.name,
type: 'backup',
settings: {
'': jobInput.settings[''],
},
userId: xo._user.id,
vms: jobInput.vms,
}
const schedules = jobInput.schedules
if (schedules !== undefined) {
const scheduleTmpId = Object.keys(schedules)[0]
expect(createdSchedule).toEqual({
...schedules[scheduleTmpId],
enabled: false,
id: expect.any(String),
jobId: jobOutput.id,
})
expectedObj.settings[createdSchedule.id] = jobInput.settings[scheduleTmpId]
}
expect(jobOutput).toEqual(expectedObj)
const DEFAULT_SCHEDULE = {
name: 'scheduleTest',
cron: '0 * * * * *',
}
const validateRootTask = (log, expected) =>
expect(log).toEqual({
const validateRootTask = (log, props) =>
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
message: 'backup',
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
...expected,
...props,
})
const validateVmTask = (task, vmId, props) => {
const validateVmTask = (task, vmId, props = {}) => {
expect(task).toMatchSnapshot({
data: {
id: expect.any(String),
@@ -91,54 +67,83 @@ const validateOperationTask = (task, props) => {
}
describe('backupNg', () => {
let defaultBackupNg
beforeAll(() => {
defaultBackupNg = {
name: 'default-backupNg',
mode: 'full',
vms: {
id: config.vms.default,
},
settings: {
'': {
reportWhen: 'never',
},
},
}
})
describe('.createJob() :', () => {
it('creates a new backup job without schedules', async () => {
const jobInput = {
mode: 'full',
vms: {
id: config.vms.default,
},
}
const jobOutput = await xo.createTempBackupNgJob(jobInput)
validateBackupJob(jobInput, jobOutput)
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
expect(backupNg).toMatchSnapshot({
id: expect.any(String),
userId: expect.any(String),
vms: expect.any(Object),
})
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
expect(backupNg.userId).toBe(xo._user.id)
})
it('creates a new backup job with schedules', async () => {
const scheduleTempId = randomId()
const jobInput = {
mode: 'full',
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
vms: {
id: config.vms.default,
},
}
const jobOutput = await xo.createTempBackupNgJob(jobInput)
validateBackupJob(
jobInput,
jobOutput,
await xo.getSchedule({ jobId: jobOutput.id })
)
})
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
expect(backupNgJob).toMatchSnapshot({
id: expect.any(String),
userId: expect.any(String),
settings: expect.any(Object),
vms: expect.any(Object),
})
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
expect(backupNgJob.userId).toBe(xo._user.id)
expect(Object.keys(backupNgJob.settings).length).toBe(2)
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
expect(backupNgJob.settings[schedule.id]).toEqual({
snapshotRetention: 1,
})
expect(schedule).toMatchSnapshot({
id: expect.any(String),
jobId: expect.any(String),
})
})
})
describe('.delete() :', () => {
it('deletes a backup job', async () => {
const scheduleTempId = randomId()
const jobId = await xo.call('backupNg.createJob', {
mode: 'full',
name: getDefaultName(),
vms: {
id: config.vms.default,
},
const { id: jobId } = await xo.call('backupNg.createJob', {
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
})
@@ -164,19 +169,16 @@ describe('backupNg', () => {
describe('.runJob() :', () => {
it('fails trying to run a backup job without schedule', async () => {
const { id } = await xo.createTempBackupNgJob({
vms: {
id: config.vms.default,
},
})
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
})
it('fails trying to run a backup job with no matching VMs', async () => {
const scheduleTempId = randomId()
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
[scheduleTempId]: { snapshotRetention: 1 },
@@ -199,8 +201,9 @@ describe('backupNg', () => {
jest.setTimeout(7e3)
const scheduleTempId = randomId()
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
[scheduleTempId]: { snapshotRetention: 1 },
@@ -214,7 +217,7 @@ describe('backupNg', () => {
expect(typeof schedule).toBe('object')
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
const [log] = await xo.getBackupLogs({
const [log] = await xo.call('backupNg.getLogs', {
scheduleId: schedule.id,
})
expect(log.warnings).toMatchSnapshot()
@@ -223,24 +226,26 @@ describe('backupNg', () => {
it('fails trying to run a backup job with a VM without disks', async () => {
jest.setTimeout(8e3)
await xo.createTempServer(config.servers.default)
const { id: vmIdWithoutDisks } = await xo.createTempVm({
const vmIdWithoutDisks = await xo.createTempVm({
name_label: 'XO Test Without Disks',
name_description: 'Creating a vm without disks',
template: config.templates.templateWithoutDisks,
})
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
vms: {
id: vmIdWithoutDisks,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -251,20 +256,16 @@ describe('backupNg', () => {
tasks: [vmTask],
...log
},
] = await xo.getBackupLogs({
] = await xo.call('backupNg.getLogs', {
jobId,
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'skipped',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
expect(vmTask).toMatchSnapshot({
@@ -288,24 +289,22 @@ describe('backupNg', () => {
const scheduleTempId = randomId()
await xo.createTempServer(config.servers.default)
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: {},
},
srs: {
id: config.srs.default,
},
vms: {
id: config.vms.default,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -316,20 +315,17 @@ describe('backupNg', () => {
tasks: [task],
...log
},
] = await xo.getBackupLogs({
] = await xo.call('backupNg.getLogs', {
jobId,
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'failure',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
expect(task).toMatchSnapshot({
@@ -351,7 +347,8 @@ describe('backupNg', () => {
test('execute three times a rolling snapshot with 2 as retention & revert to an old state', async () => {
jest.setTimeout(6e4)
await xo.createTempServer(config.servers.default)
let vm = await xo.createTempVm({
const vmId = await xo.createTempVm({
name_label: 'XO Test Temp',
name_description: 'Creating a temporary vm',
template: config.templates.default,
VDIs: [
@@ -364,45 +361,48 @@ describe('backupNg', () => {
})
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
vms: {
id: vm.id,
id: vmId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 2 },
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
for (let i = 0; i < 3; i++) {
const oldSnapshots = xo.objects.all[vmId].snapshots
await xo.call('backupNg.runJob', { id: jobId, schedule: schedule.id })
vm = await xo.waitObjectState(vm.id, ({ snapshots }) => {
await xo.waitObjectState(vmId, ({ snapshots }) => {
// Test on updating snapshots.
expect(snapshots).not.toEqual(vm.snapshots)
expect(snapshots).not.toEqual(oldSnapshots)
})
}
const { snapshots, videoram: oldVideoram } = xo.objects.all[vmId]
// Test on the retention, how many snapshots should be saved.
expect(vm.snapshots.length).toBe(2)
expect(snapshots.length).toBe(2)
const newVideoram = 16
await xo.call('vm.set', { id: vm.id, videoram: newVideoram })
await xo.waitObjectState(vm.id, ({ videoram }) => {
await xo.call('vm.set', { id: vmId, videoram: newVideoram })
await xo.waitObjectState(vmId, ({ videoram }) => {
expect(videoram).toBe(newVideoram.toString())
})
await xo.call('vm.revert', {
snapshot: vm.snapshots[0],
snapshot: snapshots[0],
})
await xo.waitObjectState(vm.id, ({ videoram }) => {
expect(videoram).toBe(vm.videoram)
await xo.waitObjectState(vmId, ({ videoram }) => {
expect(videoram).toBe(oldVideoram)
})
const [
@@ -410,20 +410,17 @@ describe('backupNg', () => {
tasks: [{ tasks: subTasks, ...vmTask }],
...log
},
] = await xo.getBackupLogs({
] = await xo.call('backupNg.getLogs', {
jobId,
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'success',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
const subTaskSnapshot = subTasks.find(
@@ -445,20 +442,23 @@ describe('backupNg', () => {
message: expect.any(String),
start: expect.any(Number),
})
expect(vmTask.data.id).toBe(vm.id)
expect(vmTask.data.id).toBe(vmId)
})
test('execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval', async () => {
jest.setTimeout(12e5)
jest.setTimeout(6e4)
const {
vms: { default: defaultVm, vmToBackup = defaultVm },
remotes: { default: defaultRemote, remote1, remote2 = defaultRemote },
srs: { localSr, sharedSr },
servers: { default: defaultServer },
} = config
expect(vmToBackup).not.toBe(undefined)
expect(remote1).not.toBe(undefined)
expect(remote2).not.toBe(undefined)
expect(localSr).not.toBe(undefined)
expect(sharedSr).not.toBe(undefined)
await xo.createTempServer(defaultServer)
const { id: remoteId1 } = await xo.createTempRemote(remote1)
@@ -468,7 +468,7 @@ describe('backupNg', () => {
const exportRetention = 2
const fullInterval = 2
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
mode: 'delta',
remotes: {
id: {
@@ -476,10 +476,11 @@ describe('backupNg', () => {
},
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
'': {
reportWhen: 'never',
fullInterval,
},
[remoteId1]: { deleteFirst: true },
@@ -488,8 +489,7 @@ describe('backupNg', () => {
vms: {
id: vmToBackup,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -503,21 +503,19 @@ describe('backupNg', () => {
expect(backups.length).toBe(exportRetention)
)
const backupLogs = await xo.getBackupLogs({
const backupLogs = await xo.call('backupNg.getLogs', {
jobId,
scheduleId: schedule.id,
})
expect(backupLogs.length).toBe(nExecutions)
backupLogs.forEach(({ tasks = [], ...log }, key) => {
backupLogs.forEach(({ tasks, ...log }, key) => {
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
mode: 'delta',
reportWhen: 'never',
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
message: 'backup',
status: 'success',
})
@@ -528,11 +526,11 @@ describe('backupNg', () => {
transfer: 0,
vm: 0,
}
tasks.forEach(({ tasks = [], ...vmTask }) => {
tasks.forEach(({ tasks, ...vmTask }) => {
if (vmTask.data !== undefined && vmTask.data.type === 'VM') {
validateVmTask(vmTask, vmToBackup, { status: 'success' })
numberOfTasks.vm++
tasks.forEach(({ tasks = [], ...subTask }) => {
tasks.forEach(({ tasks, ...subTask }) => {
if (subTask.message === 'snapshot') {
validateSnapshotTask(subTask, { status: 'success' })
numberOfTasks.snapshot++
@@ -583,111 +581,20 @@ describe('backupNg', () => {
vm: 1,
})
})
const vmBackupOnLocalSr = await xo.importVmBackup({
id: backupsByRemote[remoteId1][0],
sr: localSr,
})
const vmBackupOnSharedSr = await xo.importVmBackup({
id: backupsByRemote[remoteId2][0],
sr: sharedSr,
})
expect(xo.objects.all[vmBackupOnLocalSr]).not.toBe(undefined)
expect(xo.objects.all[vmBackupOnSharedSr]).not.toBe(undefined)
await xo.call('vm.start', { id: vmBackupOnLocalSr })
await xo.call('vm.start', { id: vmBackupOnSharedSr })
})
test('create and execute backup with enabled offline backup', async () => {
const vm = xo.objects.all[config.vms.withOsAndXenTools]
if (vm.power_state !== 'Running') {
await xo.startTempVm(vm.id, { force: true }, true)
}
const scheduleTempId = randomId()
const srId = config.srs.default
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const backupInput = {
mode: 'full',
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
},
settings: {
'': {
offlineBackup: true,
},
[scheduleTempId]: {
copyRetention: 1,
exportRetention: 1,
},
},
srs: {
id: srId,
},
vms: {
id: vm.id,
},
}
const backup = await xo.createTempBackupNgJob(backupInput)
expect(backup.settings[''].offlineBackup).toBe(true)
const schedule = await xo.getSchedule({ jobId: backup.id })
await Promise.all([
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Halted') {
throw new Error('retry')
}
}),
])
await xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Running') {
throw new Error('retry')
}
})
const backupLogs = await xo.getBackupLogs({
jobId: backup.id,
scheduleId: schedule.id,
})
expect(backupLogs.length).toBe(1)
const { tasks, ...log } = backupLogs[0]
validateRootTask(log, {
data: {
mode: backupInput.mode,
reportWhen: backupInput.settings[''].reportWhen,
},
jobId: backup.id,
jobName: backupInput.name,
scheduleId: schedule.id,
status: 'success',
})
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...vmTask }) => {
validateVmTask(vmTask, vm.id, { status: 'success' })
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...subTask }) => {
expect(subTask.message).not.toBe('snapshot')
if (subTask.message === 'export') {
validateExportTask(
subTask,
subTask.data.type === 'remote' ? remoteId : srId,
{
data: expect.any(Object),
status: 'success',
}
)
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(operationTask => {
if (
operationTask.message === 'transfer' ||
operationTask.message === 'merge'
) {
validateOperationTask(operationTask, {
result: { size: expect.any(Number) },
status: 'success',
})
}
})
}
})
})
}, 200e3)
})

View File

@@ -1,51 +0,0 @@
/* eslint-env jest */
import config from '../_config'
import xo from '../_xoConnection'
describe('issue', () => {
test('4507', async () => {
await xo.createTempServer(config.servers.default)
const props = {
coresPerSocket: 1,
cpuCap: 1,
}
const vm = await xo.createTempVm(props)
expect(vm).toMatchObject(props)
await xo.call('vm.set', {
coresPerSocket: null,
cpuCap: null,
id: vm.id,
})
await xo.waitObjectState(vm.id, vm => {
expect(vm.coresPerSocket).toBe(undefined)
expect(vm.cpuCap).toBe(undefined)
})
})
test('4514', async () => {
await xo.createTempServer(config.servers.default)
const oldName = 'Old XO Test name'
const { id, name_label } = await xo.createTempNetwork({ name: oldName })
expect(name_label).toBe(oldName)
const newName = 'New XO Test name'
await xo.call('network.set', { id, name_label: newName })
await xo.waitObjectState(id, ({ name_label }) => {
expect(name_label).toBe(newName)
})
})
test('4523', async () => {
const id = await xo.call('network.create', {
name: 'XO Test',
pool: config.pools.default,
})
expect(typeof id).toBe('string')
await xo.call('network.delete', { id })
})
})

View File

@@ -6,7 +6,7 @@ import expect from 'must'
// ===================================================================
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
import { map } from 'lodash'
import { map, assign } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -27,7 +27,7 @@ describe('disk', () => {
const config = await getConfig()
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -1,6 +1,6 @@
/* eslint-env jest */
import { find, map } from 'lodash'
import { assign, find, map } from 'lodash'
import { config, rejectionOf, xo } from './util'
@@ -151,7 +151,7 @@ describe('server', () => {
it('connects to a Xen server', async () => {
const serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
@@ -184,7 +184,7 @@ describe('server', () => {
let serverId
beforeEach(async () => {
serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
id: serverId,

View File

@@ -12,7 +12,7 @@ import {
getOneHost,
waitObjectState,
} from './util'
import { map } from 'lodash'
import { assign, map } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -33,7 +33,7 @@ describe('vbd', () => {
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -34,15 +34,15 @@
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,3 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,32 +0,0 @@
{
"name": "xo-server-transport-icinga2",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-transport-icinga2",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-transport-icinga2",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "./dist",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.1.0",
"engines": {
"node": ">=8.9.4"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0"
},
"private": true
}

View File

@@ -1,136 +0,0 @@
import assert from 'assert'
import { URL } from 'url'
// =============================================================================
export const configurationSchema = {
type: 'object',
properties: {
server: {
type: 'string',
description: `
The icinga2 server http/https address.
*If no port is provided in the URL, 5665 will be used.*
Examples:
- https://icinga2.example.com
- http://192.168.0.1:1234
`.trim(),
},
user: {
type: 'string',
description: 'The icinga2 server username',
},
password: {
type: 'string',
description: 'The icinga2 server password',
},
filter: {
type: 'string',
description: `
The filter to use
See: https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/#filters
Example:
- Monitor the backup jobs of the VMs of a specific host:
\`host.name=="xoa.example.com" && service.name=="xo-backup"\`
`.trim(),
},
acceptUnauthorized: {
type: 'boolean',
description: 'Accept unauthorized certificates',
default: false,
},
},
additionalProperties: false,
required: ['server'],
}
// =============================================================================
const STATUS_MAP = {
OK: 0,
WARNING: 1,
CRITICAL: 2,
UNKNOWN: 3,
}
// =============================================================================
class XoServerIcinga2 {
constructor({ xo }) {
this._xo = xo
}
// ---------------------------------------------------------------------------
configure(configuration) {
const serverUrl = new URL(configuration.server)
if (configuration.user !== '') {
serverUrl.username = configuration.user
}
if (configuration.password !== '') {
serverUrl.password = configuration.password
}
if (serverUrl.port === '') {
serverUrl.port = '5665' // Default icinga2 access port
}
serverUrl.pathname = '/v1/actions/process-check-result'
this._url = serverUrl.href
this._filter =
configuration.filter !== undefined ? configuration.filter : ''
this._acceptUnauthorized = configuration.acceptUnauthorized
}
load() {
this._unset = this._xo.defineProperty(
'sendIcinga2Status',
this._sendIcinga2Status,
this
)
}
unload() {
this._unset()
}
test() {
return this._sendIcinga2Status({
message:
'The server-icinga2 plugin for Xen Orchestra server seems to be working fine, nicely done :)',
status: 'OK',
})
}
// ---------------------------------------------------------------------------
_sendIcinga2Status({ message, status }) {
const icinga2Status = STATUS_MAP[status]
assert(icinga2Status !== undefined, `Invalid icinga2 status: ${status}`)
return this._xo
.httpRequest(this._url, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
},
rejectUnauthorized: !this._acceptUnauthorized,
body: JSON.stringify({
type: 'Service',
filter: this._filter,
plugin_output: message,
exit_status: icinga2Status,
}),
})
.readAll()
}
}
// =============================================================================
export default opts => new XoServerIcinga2(opts)

View File

@@ -39,8 +39,8 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-preset-env": "^1.5.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -33,15 +33,15 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"slack-node": "^0.1.8"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.5.4"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -40,8 +40,8 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.7.3",
"version": "0.7.2",
"license": "AGPL-3.0",
"description": "",
"keywords": [
@@ -36,21 +36,21 @@
},
"dependencies": {
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.5",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/log": "^0.1.4",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"rimraf": "^3.0.0"
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -5,6 +5,7 @@ import humanFormat from 'human-format'
import { createSchedule } from '@xen-orchestra/cron'
import { minify } from 'html-minifier'
import {
assign,
concat,
differenceBy,
filter,
@@ -417,7 +418,7 @@ function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
}))
)
return Object.assign(
return assign(
computeMeans(vmsStats, [
'cpu',
'ram',
@@ -445,7 +446,7 @@ function computeGlobalHostsStats({ haltedHosts, hostsStats, xo }) {
}))
)
return Object.assign(
return assign(
computeMeans(hostsStats, [
'cpu',
'ram',

View File

@@ -17,11 +17,9 @@ createUserOnFirstSignin = true
# their size just by looking at the beginning of the stream.
#
# But it is a guess, not a certainty, it depends on how the VHDs are formatted
# by XenServer.
#
# This has been tested for 5 months, therefore it's enabled by default but can
# be disabled specifically for a user if necessary.
guessVhdSizeOnImport = true
# by XenServer, therefore it's disabled for the moment but can be enabled
# specifically for a user if necessary.
guessVhdSizeOnImport = false
# Whether API logs should contains the full request/response on
# errors.

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.50.1",
"version": "5.46.0",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -30,23 +30,22 @@
"bin": "bin"
},
"engines": {
"node": ">=8"
"node": ">=6"
},
"dependencies": {
"@iarna/toml": "^2.2.1",
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.5",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.10.1",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/log": "^0.1.4",
"@xen-orchestra/mixin": "^0.0.0",
"ajv": "^6.1.1",
"app-conf": "^0.7.0",
"archiver": "^3.0.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"bluebird": "^3.5.1",
"body-parser": "^1.18.2",
@@ -58,15 +57,16 @@
"debug": "^4.0.1",
"decorator-synchronized": "^0.5.0",
"deptree": "^1.0.0",
"escape-string-regexp": "^1.0.5",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"execa": "^2.0.5",
"execa": "^1.0.0",
"express": "^4.16.2",
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.7.1",
"helmet": "^3.9.0",
@@ -90,7 +90,7 @@
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"make-error": "^1",
"micromatch": "^4.0.2",
"micromatch": "^3.1.4",
"minimist": "^1.2.0",
"moment-timezone": "^0.5.14",
"ms": "^2.1.1",
@@ -102,7 +102,7 @@
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^24.0.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pump": "^3.0.0",
@@ -122,8 +122,8 @@
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.7.0",
"ws": "^7.1.2",
"xen-api": "^0.27.2",
"ws": "^6.0.0",
"xen-api": "^0.27.1",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
@@ -147,9 +147,9 @@
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-dev": "^2.0.1",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",

View File

@@ -1,34 +0,0 @@
/* eslint-env jest */
import MultiKeyMap from './_MultiKeyMap'
describe('MultiKeyMap', () => {
it('works', () => {
const map = new MultiKeyMap()
const keys = [
// null key
[],
// simple key
['foo'],
// composite key
['foo', 'bar'],
// reverse composite key
['bar', 'foo'],
]
const values = keys.map(() => ({}))
// set all values first to make sure they are all stored and not only the
// last one
keys.forEach((key, i) => {
map.set(key, values[i])
})
keys.forEach((key, i) => {
// copy the key to make sure the array itself is not the key
expect(map.get(key.slice())).toBe(values[i])
map.delete(key.slice())
expect(map.get(key.slice())).toBe(undefined)
})
})
})

View File

@@ -16,28 +16,13 @@ function scheduleRemoveCacheEntry(keys, expires) {
const defaultKeyFn = () => []
const { slice } = Array.prototype
export const REMOVE_CACHE_ENTRY = {}
// debounce an async function so that all subsequent calls in a delay receive
// the same result
//
// similar to `p-debounce` with `leading` set to `true` but with key support
//
// - `delay`: number of milliseconds to cache the response, a function can be
// passed to use a custom delay for a call based on its parameters
export const debounceWithKey = (fn, delay, keyFn = defaultKeyFn) => {
export default (fn, delay, keyFn = defaultKeyFn) => {
const cache = new MultiKeyMap()
const delayFn = typeof delay === 'number' ? () => delay : delay
return function(arg) {
if (arg === REMOVE_CACHE_ENTRY) {
return removeCacheEntry(
cache,
ensureArray(keyFn.apply(this, slice.call(arguments, 1)))
)
}
return function() {
const keys = ensureArray(keyFn.apply(this, arguments))
let promise = cache.get(keys)
if (promise === undefined) {
@@ -45,15 +30,10 @@ export const debounceWithKey = (fn, delay, keyFn = defaultKeyFn) => {
const remove = scheduleRemoveCacheEntry.bind(
cache,
keys,
Date.now() + delayFn.apply(this, arguments)
Date.now() + delay
)
promise.then(remove, remove)
}
return promise
}
}
debounceWithKey.decorate = (...params) => (target, name, descriptor) => ({
...descriptor,
value: debounceWithKey(descriptor.value, ...params),
})

Some files were not shown because too many files have changed in this diff Show More