Compare commits

..

4 Commits

Author SHA1 Message Date
Mohamedox
a4b966aa96 adapt PR to comments 2019-10-02 17:05:47 +02:00
Mohamedox
7459c9e2cf show description only when it exists 2019-10-02 16:16:58 +02:00
Mohamedox
4edaf67f0a update changelog 2019-10-02 16:15:47 +02:00
Mohamedox
876c1130e1 feat(xo-web/hub): display template description on Hub 2019-10-02 16:10:21 +02:00
171 changed files with 5575 additions and 6029 deletions

View File

@@ -21,7 +21,7 @@ module.exports = {
overrides: [
{
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
rules: {
'no-console': 'off',
},
@@ -40,13 +40,6 @@ module.exports = {
'react/jsx-handler-names': 'off',
// disabled because not always relevant, we might reconsider in the future
//
// enabled by https://github.com/standard/eslint-config-standard/commit/319b177750899d4525eb1210686f6aca96190b2f
//
// example: https://github.com/vatesfr/xen-orchestra/blob/31ed3767c67044ca445658eb6b560718972402f2/packages/xen-api/src/index.js#L156-L157
'lines-between-class-members': 'off',
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',

View File

@@ -36,7 +36,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,378 +0,0 @@
#!/usr/bin/env node
const args = process.argv.slice(2)
if (
args.length === 0 ||
/^(?:-h|--help)$/.test(args[0]) ||
args[0] !== 'clean-vms'
) {
console.log('Usage: xo-backups clean-vms [--force] xo-vm-backups/*')
// eslint-disable-next-line no-process-exit
return process.exit(1)
}
// remove `clean-vms` arg which is the only available command ATM
args.splice(0, 1)
// only act (ie delete files) if `--force` is present
const force = args[0] === '--force'
if (force) {
args.splice(0, 1)
}
// -----------------------------------------------------------------------------
const assert = require('assert')
const lockfile = require('proper-lockfile')
const { default: Vhd } = require('vhd-lib')
const { curryRight, flatten } = require('lodash')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { pipe, promisifyAll } = require('promise-toolbox')
const fs = promisifyAll(require('fs'))
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
// -----------------------------------------------------------------------------
const asyncMap = curryRight((iterable, fn) =>
Promise.all(
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
)
)
const filter = (...args) => thisArg => thisArg.filter(...args)
// TODO: better check?
// our heuristic is not good enough, there has been some false positives
// (detected as invalid by us but valid by `tar` and imported with success),
// either:
// - these files were normal but the check is incorrect
// - these files were invalid but without data loss
// - these files were invalid but with silent data loss
//
// FIXME: the heuristic does not work if the XVA is compressed, we need to
// implement a specific test for it
//
// maybe reading the end of the file looking for a file named
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
//
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
const isValidTar = async path => {
try {
const fd = await fs.open(path, 'r')
try {
const { size } = await fs.fstat(fd)
if (size <= 1024 || size % 512 !== 0) {
return false
}
const buf = Buffer.allocUnsafe(1024)
assert.strictEqual(
await fs.read(fd, buf, 0, buf.length, size - buf.length),
buf.length
)
return buf.every(_ => _ === 0)
} finally {
fs.close(fd).catch(noop)
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidTar', path, error)
return true
}
}
const noop = Function.prototype
const readDir = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
entries[i] = `${path}/${entry}`
})
return entries
},
error => {
// a missing dir is by definition empty
if (error != null && error.code === 'ENOENT') {
return []
}
throw error
}
)
// -----------------------------------------------------------------------------
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
console.warn('Unused parents of VHD', child)
chain
.slice(1)
.reverse()
.forEach(parent => {
console.warn(' ', parent)
})
force && console.warn(' merging…')
console.warn('')
if (force) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
}
await Promise.all([
force && fs.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
console.warn('Unused VHD', child)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(child)
}),
])
}
const listVhds = pipe([
vmDir => vmDir + '/vdis',
readDir,
asyncMap(readDir),
flatten,
asyncMap(readDir),
flatten,
filter(_ => _.endsWith('.vhd')),
])
async function handleVm(vmDir) {
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(await listVhds(vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error(
'this script does not support multiple VHD children'
)
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
console.warn('Error while checking VHD', path)
console.warn(' ', error)
if (error != null && error.code === 'ERR_ASSERTION') {
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(path))
}
}
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
console.warn('Error while checking VHD', vhd)
console.warn(' missing parent', parent)
force && console.warn(' deleting…')
console.warn('')
force && deletions.push(handler.unlink(vhd))
}
}
// > A property that is deleted before it has been visited will not be
// > visited later.
// >
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
for (const child in vhdParents) {
deleteIfOrphan(child)
}
await Promise.all(deletions)
}
const [jsons, xvas] = await readDir(vmDir).then(entries => [
entries.filter(_ => _.endsWith('.json')),
new Set(entries.filter(_ => _.endsWith('.xva'))),
])
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await isValidTar(path))) {
console.warn('Potential broken XVA', path)
console.warn('')
}
})
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await fs.readFile(json))
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve(vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
console.warn('Error while checking backup', json)
console.warn(' missing file', linkedXva)
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
} else {
console.warn('Error while checking backup', json)
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
console.warn(
' %i/%i missing VHDs',
missingVhds.length,
linkedVhds.length
)
missingVhds.forEach(vhd => {
console.warn(' ', vhd)
})
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
const toCheck = new Set(unusedVhds)
const getUsedChildChainOrDelete = vhd => {
if (vhd in vhdChainsToMerge) {
const chain = vhdChainsToMerge[vhd]
delete vhdChainsToMerge[vhd]
return chain
}
if (!unusedVhds.has(vhd)) {
return [vhd]
}
// no longer needs to be checked
toCheck.delete(vhd)
const child = vhdChildren[vhd]
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
return chain
}
}
console.warn('Unused VHD', vhd)
force && console.warn(' deleting…')
console.warn('')
force && unusedVhdsDeletion.push(handler.unlink(vhd))
}
toCheck.forEach(vhd => {
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain))
}
})
}
await Promise.all([
unusedVhdsDeletion,
asyncMap(unusedXvas, path => {
console.warn('Unused XVA', path)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(path)
}),
])
}
// -----------------------------------------------------------------------------
asyncMap(args, async vmDir => {
vmDir = resolve(vmDir)
// TODO: implement this in `xo-server`, not easy because not compatible with
// `@xen-orchestra/fs`.
const release = await lockfile.lock(vmDir)
try {
await handleVm(vmDir)
} catch (error) {
console.error('handleVm', vmDir, error)
} finally {
await release()
}
}).catch(error => console.error('main', error))

View File

@@ -1,27 +0,0 @@
{
"bin": {
"xo-backups": "index.js"
},
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/fs": "^0.10.1",
"lodash": "^4.17.15",
"promise-toolbox": "^0.14.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^0.7.0"
},
"engines": {
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
"repository": {
"directory": "@xen-orchestra/backups-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.0.0"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "1.0.6",
"version": "1.0.4",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [
@@ -46,7 +46,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -5,21 +5,14 @@ import parse from './parse'
const MAX_DELAY = 2 ** 31 - 1
function nextDelay(schedule) {
const now = schedule._createDate()
return next(schedule._schedule, now) - now
}
class Job {
constructor(schedule, fn) {
let scheduledDate
const wrapper = () => {
const now = Date.now()
if (scheduledDate > now) {
// we're early, delay
//
// no need to check _isEnabled, we're just delaying the existing timeout
//
// see https://github.com/vatesfr/xen-orchestra/issues/4625
this._timeout = setTimeout(wrapper, scheduledDate - now)
return
}
this._isRunning = true
let result
@@ -39,9 +32,7 @@ class Job {
this._isRunning = false
if (this._isEnabled) {
const now = schedule._createDate()
scheduledDate = +next(schedule._schedule, now)
const delay = scheduledDate - now
const delay = nextDelay(schedule)
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)

View File

@@ -34,7 +34,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -33,7 +33,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -30,7 +30,7 @@
"get-stream": "^4.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
@@ -46,7 +46,7 @@
"@babel/preset-flow": "^7.0.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"dotenv": "^8.0.0",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"

View File

@@ -31,14 +31,14 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
},
@@ -48,7 +48,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -36,7 +36,7 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-dev": "^1.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -28,7 +28,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -2,59 +2,20 @@
## **next**
### Breaking changes
- `xo-server` requires Node 8.
### Enhancements
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
### Bug fixes
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
### Released packages
- @xen-orchestra/cron v1.0.5
- xo-server-transport-icinga2 v0.1.0
- xo-server-sdn-controller v0.3.1
- xo-server v5.51.0
- xo-web v5.51.0
### Dropped packages
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
## **5.39.1** (2019-10-11)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
### Bug fixes
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
### Released packages
- xo-web v5.50.3
## **5.39.0** (2019-09-30)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Highlights
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))

View File

@@ -7,11 +7,13 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [HUB] Display template description (PR [#4575](https://github.com/vatesfr/xen-orchestra/pull/4575))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
### Released packages
@@ -20,6 +22,5 @@
>
> Rule of thumb: add packages on top.
- @xen-orchestra/cron v1.0.6
- xo-server v5.52.0
- xo-web v5.52.0
- xo-server v5.51.0
- xo-web v5.51.0

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

View File

@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
```
$ node -v
v8.16.2
v8.12.0
```
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.

View File

@@ -41,20 +41,6 @@ However, if you want to start a manual check, you can do it by clicking on the "
![](./assets/xo5updatebutton.png)
#### Release channel
In Xen Orchestra, you can make a choice between two different release channels.
##### Stable
The stable channel is intended to be a version of Xen Orchestra that is already **one month old** (and therefore will benefit from one month of community feedback and various fixes). This way, users more concerned with the stability of their appliance will have the option to stay on a slightly older (and tested) version of XO (still supported by our pro support).
##### Latest
The latest channel will include all the latest improvements available in Xen Orchestra. The version available in latest has already been QA'd by our team, but issues may still occur once deployed in vastly varying environments, such as our user base has.
> To select the release channel of your choice, go to the XOA > Updates view.
![](./assets/release-channels.png)
#### Upgrade
If a new version is found, you'll have an upgrade button and its tooltip displayed:

View File

@@ -12,18 +12,18 @@
"eslint-config-standard-jsx": "^8.1.0",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^10.0.0",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.109.0",
"flow-bin": "^0.106.3",
"globby": "^10.0.0",
"husky": "^3.0.0",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"sorted-object": "^2.0.1"
},
"engines": {

View File

@@ -35,7 +35,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.1",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -33,7 +33,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -39,10 +39,10 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"rimraf": "^3.0.0",
"tmp": "^0.1.0"
},

View File

@@ -21,13 +21,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -38,7 +37,7 @@
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",

View File

@@ -1,5 +1,4 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
@@ -14,23 +13,18 @@ import {
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const { warn } = createLogger('vhd-lib:createSyntheticStream')
export default async function createSyntheticStream(handler, paths) {
export default async function createSyntheticStream(handler, path) {
const fds = []
const cleanup = () => {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
warn('error while closing file', {
error,
fd: fds[i],
})
console.warn('createReadStream, closeFd', i, error)
})
}
}
try {
const vhds = []
const open = async path => {
while (true) {
const fd = await handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
@@ -38,18 +32,11 @@ export default async function createSyntheticStream(handler, paths) {
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
return vhd
}
if (typeof paths === 'string') {
let path = paths
let vhd
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
} else {
for (const path of paths) {
await open(path)
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
@@ -16,7 +15,10 @@ import {
SECTOR_SIZE,
} from './_constants'
const { debug } = createLogger('vhd-lib:Vhd')
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//

View File

@@ -48,7 +48,7 @@
"@babel/core": "^7.1.5",
"@babel/preset-env": "^7.1.5",
"babel-plugin-lodash": "^3.2.11",
"cross-env": "^6.0.3",
"cross-env": "^5.1.4",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -46,7 +46,7 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -60,7 +60,7 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find } from 'lodash'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
@@ -110,7 +110,7 @@ const main = async args => {
asCallback.call(
fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
}).then(value => (isArray(value) ? Promise.all(value) : value)),
cb
)
})(repl.eval)

View File

@@ -4,7 +4,7 @@ import kindOf from 'kindof'
import ms from 'ms'
import httpRequest from 'http-request-plus'
import { EventEmitter } from 'events'
import { map, noop, omit } from 'lodash'
import { isArray, map, noop, omit } from 'lodash'
import {
cancelable,
defer,
@@ -113,7 +113,7 @@ export class Xapi extends EventEmitter {
this._watchedTypes = undefined
const { watchEvents } = opts
if (watchEvents !== false) {
if (Array.isArray(watchEvents)) {
if (isArray(watchEvents)) {
this._watchedTypes = watchEvents
}
this.watchEvents()
@@ -1075,7 +1075,7 @@ export class Xapi extends EventEmitter {
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
const value = data[field]
if (Array.isArray(value)) {
if (isArray(value)) {
if (value.length === 0 || isOpaqueRef(value[0])) {
getters[$field] = function() {
const value = this[field]

View File

@@ -38,16 +38,16 @@
"human-format": "^0.10.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"micromatch": "^3.1.3",
"mkdirp": "^0.5.1",
"nice-pipe": "0.0.0",
"pretty-ms": "^5.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"strip-indent": "^2.0.0",
"xdg-basedir": "^3.0.0",
"xo-lib": "^0.9.0"
},
"devDependencies": {
@@ -56,7 +56,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -7,6 +7,7 @@ const promisify = require('bluebird').promisify
const readFile = promisify(require('fs').readFile)
const writeFile = promisify(require('fs').writeFile)
const assign = require('lodash/assign')
const l33t = require('l33teral')
const mkdirp = promisify(require('mkdirp'))
const xdgBasedir = require('xdg-basedir')
@@ -40,7 +41,7 @@ const save = (exports.save = function(config) {
exports.set = function(data) {
return load().then(function(config) {
return save(Object.assign(config, data))
return save(assign(config, data))
})
}

View File

@@ -17,6 +17,7 @@ const getKeys = require('lodash/keys')
const hrp = require('http-request-plus').default
const humanFormat = require('human-format')
const identity = require('lodash/identity')
const isArray = require('lodash/isArray')
const isObject = require('lodash/isObject')
const micromatch = require('micromatch')
const nicePipe = require('nice-pipe')
@@ -297,11 +298,7 @@ async function listCommands(args) {
str.push(
name,
'=<',
type == null
? 'unknown type'
: Array.isArray(type)
? type.join('|')
: type,
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
'>'
)

View File

@@ -34,7 +34,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"rimraf": "^3.0.0"
},

View File

@@ -36,7 +36,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,5 +1,5 @@
import { BaseError } from 'make-error'
import { iteratee } from 'lodash'
import { isArray, iteratee } from 'lodash'
class XoError extends BaseError {
constructor({ code, message, data }) {
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
}))
export const invalidParameters = create(10, (message, errors) => {
if (Array.isArray(message)) {
if (isArray(message)) {
errors = message
message = undefined
}

View File

@@ -41,7 +41,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -32,7 +32,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"deep-freeze": "^0.0.1",
"rimraf": "^3.0.0"
},

View File

@@ -40,7 +40,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -39,14 +39,14 @@
"inquirer": "^7.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -40,7 +40,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-preset-env": "^1.6.1",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -48,7 +48,7 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -354,7 +354,7 @@ class BackupReportsXoPlugin {
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
@@ -390,7 +390,7 @@ class BackupReportsXoPlugin {
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
@@ -646,7 +646,7 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
@@ -656,7 +656,7 @@ class BackupReportsXoPlugin {
})
}
_sendReport({ markdown, subject, success, nagiosMarkdown }) {
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
@@ -676,14 +676,9 @@ class BackupReportsXoPlugin {
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: success ? 0 : 2,
status: nagiosStatus,
message: nagiosMarkdown,
}),
xo.sendIcinga2Status !== undefined &&
xo.sendIcinga2Status({
status: success ? 'OK' : 'CRITICAL',
message: markdown,
}),
])
}
@@ -713,7 +708,7 @@ class BackupReportsXoPlugin {
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
})
}
@@ -909,7 +904,7 @@ class BackupReportsXoPlugin {
? ICON_FAILURE
: ICON_SKIPPED
}`,
success: globalSuccess,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${

View File

@@ -0,0 +1,10 @@
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/test/
/tests/
*.spec.js
*.spec.js.map

View File

@@ -1,6 +1,4 @@
# xo-server-transport-icinga2 [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
> xo-server plugin to send status to icinga2 server
# xo-server-cloud [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
## Install
@@ -13,13 +11,6 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
## Development
### `Xo#sendIcinga2Status({ status, message })`
This xo method is called to send a passive check to icinga2 and change the status of a service.
It has two parameters:
- status: it's the service status in icinga2 (0: OK | 1: WARNING | 2: CRITICAL | 3: UNKNOWN).
- message: it's the status information in icinga2.
```
# Install dependencies
> npm install

View File

@@ -0,0 +1,54 @@
{
"name": "xo-server-cloud",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [
"cloud",
"orchestra",
"plugin",
"xen",
"xen-orchestra",
"xo-server"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-cloud",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Pierre Donias",
"email": "pierre.donias@gmail.com"
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.5.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}

View File

@@ -0,0 +1,208 @@
import Client, { createBackoff } from 'jsonrpc-websocket-client'
import hrp from 'http-request-plus'
const WS_URL = 'ws://localhost:9001'
const HTTP_URL = 'http://localhost:9002'
// ===================================================================
class XoServerCloud {
constructor({ xo }) {
this._xo = xo
// Defined in configure().
this._conf = null
this._key = null
}
configure(configuration) {
this._conf = configuration
}
async load() {
const getResourceCatalog = this._getCatalog.bind(this)
getResourceCatalog.description =
"Get the list of user's available resources"
getResourceCatalog.permission = 'admin'
getResourceCatalog.params = {
filters: { type: 'object', optional: true },
}
const registerResource = ({ namespace }) =>
this._registerResource(namespace)
registerResource.description = 'Register a resource via cloud plugin'
registerResource.params = {
namespace: {
type: 'string',
},
}
registerResource.permission = 'admin'
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
this
)
downloadAndInstallResource.description =
'Download and install a resource via cloud plugin'
downloadAndInstallResource.params = {
id: { type: 'string' },
namespace: { type: 'string' },
version: { type: 'string' },
sr: { type: 'string' },
}
downloadAndInstallResource.resolve = {
sr: ['sr', 'SR', 'administrate'],
}
downloadAndInstallResource.permission = 'admin'
this._unsetApiMethods = this._xo.addApiMethods({
cloud: {
downloadAndInstallResource,
getResourceCatalog,
registerResource,
},
})
this._unsetRequestResource = this._xo.defineProperty(
'requestResource',
this._requestResource,
this
)
const updater = (this._updater = new Client(WS_URL))
const connect = () =>
updater.open(createBackoff()).catch(error => {
console.error('xo-server-cloud: fail to connect to updater', error)
return connect()
})
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
console.warn('xo-server-cloud: next attempt in %s ms', delay)
})
connect()
}
unload() {
this._unsetApiMethods()
this._unsetRequestResource()
}
// ----------------------------------------------------------------
async _getCatalog({ filters } = {}) {
const catalog = await this._updater.call('getResourceCatalog', { filters })
if (!catalog) {
throw new Error('cannot get catalog')
}
return catalog
}
// ----------------------------------------------------------------
async _getNamespaces() {
const catalog = await this._getCatalog()
if (!catalog._namespaces) {
throw new Error('cannot get namespaces')
}
return catalog._namespaces
}
// ----------------------------------------------------------------
async _downloadAndInstallResource({ id, namespace, sr, version }) {
const stream = await this._requestResource({
hub: true,
id,
namespace,
version,
})
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
srId: sr.id,
type: 'xva',
})
await vm.update_other_config({
'xo:resource:namespace': namespace,
'xo:resource:xva:version': version,
'xo:resource:xva:id': id,
})
}
// ----------------------------------------------------------------
async _registerResource(namespace) {
const _namespace = (await this._getNamespaces())[namespace]
if (_namespace === undefined) {
throw new Error(`${namespace} is not available`)
}
if (_namespace.registered || _namespace.pending) {
throw new Error(`already registered for ${namespace}`)
}
return this._updater.call('registerResource', { namespace })
}
// ----------------------------------------------------------------
async _getNamespaceCatalog({ hub, namespace }) {
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
namespace
]
if (!namespaceCatalog) {
throw new Error(`cannot get catalog: ${namespace} not registered`)
}
return namespaceCatalog
}
// ----------------------------------------------------------------
async _requestResource({ hub = false, id, namespace, version }) {
const _namespace = (await this._getNamespaces())[namespace]
if (!hub && (!_namespace || !_namespace.registered)) {
throw new Error(`cannot get resource: ${namespace} not registered`)
}
const { _token: token } = await this._getNamespaceCatalog({
hub,
namespace,
})
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
if (token === undefined) {
throw new Error(`${namespace} namespace token is undefined`)
}
const downloadToken = await this._updater.call('getResourceDownloadToken', {
token,
id,
version,
})
if (!downloadToken) {
throw new Error('cannot get download token')
}
const response = await hrp(HTTP_URL, {
headers: {
Authorization: `Bearer ${downloadToken}`,
},
})
// currently needed for XenApi#putResource()
response.length = response.headers['content-length']
return response
}
}
export default opts => new XoServerCloud(opts)

View File

@@ -31,7 +31,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"lodash": "^4.16.2"
},
"devDependencies": {

View File

@@ -21,7 +21,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"d3-time-format": "^2.1.1",
"json5": "^2.0.1",
"lodash": "^4.17.4"
@@ -32,7 +32,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,6 +1,6 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const COMPARATOR_FN = {
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
Object.assign(result, {
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
definition.alarmTriggerLevel
)
const data = getter(result.object)
Object.assign(result, {
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
@@ -680,7 +680,7 @@ ${entry.listItem}
},
}
if (xapiObject.$type === 'VM') {
payload.vm_uuid = xapiObject.uuid
payload['vm_uuid'] = xapiObject.uuid
}
// JSON is not well formed, can't use the default node parser
return JSON5.parse(

View File

@@ -15,7 +15,7 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.3.1",
"version": "0.3.0",
"engines": {
"node": ">=6"
},
@@ -25,13 +25,13 @@
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
"cross-env": "^5.2.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.98",
"promise-toolbox": "^0.14.0",
"node-openssl-cert": "^0.0.97",
"promise-toolbox": "^0.13.0",
"uuid": "^3.3.2"
},
"private": true

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,8 @@ export class OvsdbClient {
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
- `other_config`:
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
Attributes on created OVS interfaces:
- `options`:
@@ -66,49 +67,55 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
network,
networkUuid,
networkName,
remoteAddress,
encapsulation,
key,
password,
privateNetworkUuid
remoteNetwork
) {
if (
this._adding.find(
elem => elem.id === network.uuid && elem.addr === remoteAddress
elem => elem.id === networkUuid && elem.addr === remoteAddress
) !== undefined
) {
return
}
const adding = { id: network.uuid, addr: remoteAddress }
const adding = { id: networkUuid, addr: remoteAddress }
this._adding.push(adding)
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridge,
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return bridge.name
return bridgeName
}
const index = ++this._numberOfPortAndInterface
const interfaceName = bridge.name + '_iface' + index
const portName = bridge.name + '_port' + index
const interfaceName = bridgeName + '_iface' + index
const portName = bridgeName + '_port' + index
// Add interface and port to the bridge
const options = { remote_ip: remoteAddress, key: key }
@@ -132,9 +139,11 @@ export class OvsdbClient {
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: toMap({
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
}),
other_config: toMap(
remoteNetwork !== undefined
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
: { 'xo:sdn-controller:private-pool-wide': 'true' }
),
},
'uuid-name': 'new_port',
}
@@ -142,7 +151,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
@@ -154,7 +163,7 @@ export class OvsdbClient {
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
if (jsonObjects === undefined) {
socket.destroy()
@@ -180,8 +189,8 @@ export class OvsdbClient {
details,
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
@@ -191,24 +200,33 @@ export class OvsdbClient {
log.debug('Port and interface added to bridge', {
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
return bridge.name
return bridgeName
}
async resetForNetwork(network, privateNetworkUuid) {
async resetForNetwork(
networkUuid,
networkName,
crossPoolOnly,
remoteNetwork
) {
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridge, socket)
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
socket.destroy()
return
@@ -232,14 +250,15 @@ export class OvsdbClient {
// 2019-09-03
// Compatibility code, to be removed in 1 year.
const oldShouldDelete =
config[0] === 'private_pool_wide' ||
config[0] === 'cross_pool' ||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
config[0] === 'xo:sdn-controller:cross-pool'
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
(config[0] === 'cross_pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
const shouldDelete =
config[0] === 'xo:sdn-controller:private-network-uuid' &&
config[1] === privateNetworkUuid
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
!crossPoolOnly) ||
(config[0] === 'xo:sdn-controller:cross-pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
if (shouldDelete || oldShouldDelete) {
portsToDelete.push(['uuid', portUuid])
@@ -256,7 +275,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
@@ -269,7 +288,7 @@ export class OvsdbClient {
if (jsonObjects[0].error != null) {
log.error('Error while deleting ports from bridge', {
error: jsonObjects[0].error,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -278,7 +297,7 @@ export class OvsdbClient {
log.debug('Ports deleted from bridge', {
nPorts: jsonObjects[0].result[0].count,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -316,9 +335,9 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _getBridgeForNetwork(network, socket) {
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
]
const selectResult = await this._select(
'Bridge',
@@ -328,17 +347,25 @@ export class OvsdbClient {
)
if (selectResult === undefined) {
log.error('No bridge found for network', {
network: network.name_label,
network: networkName,
host: this.host.name_label,
})
return {}
return []
}
return { uuid: selectResult._uuid[1], name: selectResult.name }
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
const ports = await this._getBridgePorts(bridge, socket)
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
return false
}
@@ -366,8 +393,8 @@ export class OvsdbClient {
return false
}
async _getBridgePorts(bridge, socket) {
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult === undefined) {
return

View File

@@ -1,202 +0,0 @@
import createLogger from '@xen-orchestra/log'
import { filter, find, forOwn, map, sample } from 'lodash'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:private-network')
// =============================================================================
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
const createPassword = () =>
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
// =============================================================================
export class PrivateNetwork {
constructor(controller, uuid) {
this.controller = controller
this.uuid = uuid
this.networks = {}
}
// ---------------------------------------------------------------------------
async addHost(host) {
if (host.$ref === this.center?.$ref) {
// Nothing to do
return
}
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
log.error('No OVSDB client found', {
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
const centerClient = this.controller.ovsdbClients[this.center.$ref]
if (centerClient === undefined) {
log.error('No OVSDB client found for star-center', {
privateNetwork: this.uuid,
host: this.center.name_label,
pool: this.center.$pool.name_label,
})
return
}
const network = this.networks[host.$pool.uuid]
const centerNetwork = this.networks[this.center.$pool.uuid]
const otherConfig = network.other_config
const encapsulation =
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
const password =
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
])
} catch (error) {
log.error('Error while connecting host to private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
log.info('Host added', {
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return bridgeName
}
addNetwork(network) {
this.networks[network.$pool.uuid] = network
log.info('Adding network', {
privateNetwork: this.uuid,
network: network.name_label,
pool: network.$pool.name_label,
})
if (this.center === undefined) {
return this.electNewCenter()
}
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
return Promise.all(
map(hosts, async host => {
const hostClient = this.controller.ovsdbClients[host.$ref]
const network = this.networks[host.$pool.uuid]
await hostClient.resetForNetwork(network, this.uuid)
await this.addHost(host)
})
)
}
async electNewCenter() {
delete this.center
// TODO: make it random
const hosts = this._getHosts()
for (const host of hosts) {
const pif = find(host.$PIFs, {
network: this.networks[host.$pool.uuid].$ref,
})
if (pif?.currently_attached && host.$metrics.live) {
this.center = host
break
}
}
if (this.center === undefined) {
log.error('No available host to elect new star-center', {
privateNetwork: this.uuid,
})
return
}
await this._reset()
// Recreate star topology
await Promise.all(map(hosts, host => this.addHost(host)))
log.info('New star-center elected', {
center: this.center.name_label,
privateNetwork: this.uuid,
})
}
// ---------------------------------------------------------------------------
getPools() {
const pools = []
forOwn(this.networks, network => {
pools.push(network.$pool)
})
return pools
}
// ---------------------------------------------------------------------------
_reset() {
return Promise.all(
map(this._getHosts(), async host => {
// Clean old ports and interfaces
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
return
}
const network = this.networks[host.$pool.uuid]
try {
await hostClient.resetForNetwork(network, this.uuid)
} catch (error) {
log.error('Error while resetting private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: network.$pool.name_label,
})
}
})
)
}
// ---------------------------------------------------------------------------
_getHosts() {
const hosts = []
forOwn(this.networks, network => {
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
})
return hosts
}
}

View File

@@ -36,7 +36,7 @@
"golike-defer": "^0.4.1",
"jest": "^24.8.0",
"lodash": "^4.17.11",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-lib": "^0.9.0"

View File

@@ -14,7 +14,6 @@
[vms]
default = ''
withOsAndXenTools = ''
# vmToBackup = ''
[templates]

View File

@@ -1,6 +0,0 @@
export const getDefaultName = () => `xo-server-test ${new Date().toISOString()}`
export const getDefaultSchedule = () => ({
name: getDefaultName(),
cron: '0 * * * * *',
})

View File

@@ -2,11 +2,15 @@
import defer from 'golike-defer'
import Xo from 'xo-lib'
import XoCollection from 'xo-collection'
import { defaultsDeep, find, forOwn, pick } from 'lodash'
import { find, forOwn } from 'lodash'
import { fromEvent } from 'promise-toolbox'
import config from './_config'
import { getDefaultName } from './_defaultValues'
const getDefaultCredentials = () => {
const { email, password } = config.xoConnection
return { email, password }
}
class XoConnection extends Xo {
constructor(opts) {
@@ -68,10 +72,7 @@ class XoConnection extends Xo {
}
@defer
async connect(
$defer,
credentials = pick(config.xoConnection, 'email', 'password')
) {
async connect($defer, credentials = getDefaultCredentials()) {
await this.open()
$defer.onFailure(() => this.close())
@@ -110,26 +111,9 @@ class XoConnection extends Xo {
}
async createTempBackupNgJob(params) {
// mutate and inject default values
defaultsDeep(params, {
mode: 'full',
name: getDefaultName(),
settings: {
'': {
// it must be enabled because the XAPI might be not able to coalesce VDIs
// as fast as the tests run
//
// see https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
bypassVdiChainsCheck: true,
// it must be 'never' to avoid race conditions with the plugin `backup-reports`
reportWhen: 'never',
},
},
})
const id = await this.call('backupNg.createJob', params)
this._tempResourceDisposers.push('backupNg.deleteJob', { id })
return this.call('backupNg.getJob', { id })
const job = await this.call('backupNg.createJob', params)
this._tempResourceDisposers.push('backupNg.deleteJob', { id: job.id })
return job
}
async createTempNetwork(params) {
@@ -144,7 +128,7 @@ class XoConnection extends Xo {
async createTempVm(params) {
const id = await this.call('vm.create', {
name_label: getDefaultName(),
name_label: 'XO Test',
template: config.templates.templateWithoutDisks,
...params,
})
@@ -154,19 +138,6 @@ class XoConnection extends Xo {
})
}
async startTempVm(id, params, withXenTools = false) {
await this.call('vm.start', { id, ...params })
this._tempResourceDisposers.push('vm.stop', { id, force: true })
return this.waitObjectState(id, vm => {
if (
vm.power_state !== 'Running' ||
(withXenTools && vm.xenTools === false)
) {
throw new Error('retry')
}
})
}
async createTempRemote(params) {
const remote = await this.call('remote.create', params)
this._tempResourceDisposers.push('remote.delete', { id: remote.id })

View File

@@ -1,6 +1,61 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`backupNg .createJob() : creates a new backup job with schedules 1`] = `
Object {
"id": Any<String>,
"mode": "full",
"name": "default-backupNg",
"settings": Any<Object>,
"type": "backup",
"userId": Any<String>,
"vms": Any<Object>,
}
`;
exports[`backupNg .createJob() : creates a new backup job with schedules 2`] = `
Object {
"cron": "0 * * * * *",
"enabled": false,
"id": Any<String>,
"jobId": Any<String>,
"name": "scheduleTest",
}
`;
exports[`backupNg .createJob() : creates a new backup job without schedules 1`] = `
Object {
"id": Any<String>,
"mode": "full",
"name": "default-backupNg",
"settings": Object {
"": Object {
"reportWhen": "never",
},
},
"type": "backup",
"userId": Any<String>,
"vms": Any<Object>,
}
`;
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "skipped",
}
`;
exports[`backupNg .runJob() : fails trying to run a backup job with a VM without disks 2`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -37,6 +92,23 @@ Array [
exports[`backupNg .runJob() : fails trying to run a backup job without schedule 1`] = `[JsonRpcError: invalid parameters]`;
exports[`backupNg .runJob() : fails trying to run backup job without retentions 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "failure",
}
`;
exports[`backupNg .runJob() : fails trying to run backup job without retentions 2`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -55,83 +127,37 @@ Object {
}
`;
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 2`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -142,7 +168,7 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 3`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -157,19 +183,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 4`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 5`] = `
Object {
"end": Any<Number>,
@@ -184,6 +197,19 @@ Object {
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 6`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -198,19 +224,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 7`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 8`] = `
Object {
"end": Any<Number>,
@@ -226,13 +239,12 @@ Object {
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 9`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
@@ -240,10 +252,15 @@ Object {
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 10`] = `
Object {
"data": Object {
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"message": "snapshot",
"result": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
@@ -253,8 +270,7 @@ exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retent
Object {
"data": Object {
"id": Any<String>,
"isFull": false,
"type": "remote",
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
@@ -268,29 +284,14 @@ exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retent
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"message": "snapshot",
"result": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 13`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -305,6 +306,19 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 14`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 15`] = `
Object {
"end": Any<Number>,
@@ -319,6 +333,21 @@ Object {
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 16`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": false,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -331,7 +360,36 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 17`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
Object {
"data": Object {
"mode": "delta",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
Object {
"data": Object {
"id": Any<String>,
@@ -345,7 +403,7 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 18`] = `
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -356,47 +414,6 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 19`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": true,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 20`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 21`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 22`] = `
Object {
"data": Object {
@@ -438,7 +455,65 @@ Object {
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 25`] = `
Object {
"data": Object {
"id": Any<String>,
"isFull": true,
"type": "remote",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 26`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 27`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 1`] = `
Object {
"data": Object {
"mode": "full",
"reportWhen": "never",
},
"end": Any<Number>,
"id": Any<String>,
"jobId": Any<String>,
"jobName": "default-backupNg",
"message": "backup",
"scheduleId": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
@@ -449,7 +524,7 @@ Object {
}
`;
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 2`] = `
exports[`backupNg execute three times a rolling snapshot with 2 as retention & revert to an old state 3`] = `
Object {
"data": Object {
"id": Any<String>,

View File

@@ -6,44 +6,20 @@ import { noSuchObject } from 'xo-common/api-errors'
import config from '../_config'
import randomId from '../_randomId'
import xo from '../_xoConnection'
import { getDefaultName, getDefaultSchedule } from '../_defaultValues'
const validateBackupJob = (jobInput, jobOutput, createdSchedule) => {
const expectedObj = {
id: expect.any(String),
mode: jobInput.mode,
name: jobInput.name,
type: 'backup',
settings: {
'': jobInput.settings[''],
},
userId: xo._user.id,
vms: jobInput.vms,
}
const schedules = jobInput.schedules
if (schedules !== undefined) {
const scheduleTmpId = Object.keys(schedules)[0]
expect(createdSchedule).toEqual({
...schedules[scheduleTmpId],
enabled: false,
id: expect.any(String),
jobId: jobOutput.id,
})
expectedObj.settings[createdSchedule.id] = jobInput.settings[scheduleTmpId]
}
expect(jobOutput).toEqual(expectedObj)
const DEFAULT_SCHEDULE = {
name: 'scheduleTest',
cron: '0 * * * * *',
}
const validateRootTask = (log, expected) =>
expect(log).toEqual({
const validateRootTask = (log, props) =>
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
message: 'backup',
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
...expected,
...props,
})
const validateVmTask = (task, vmId, props) => {
@@ -90,55 +66,88 @@ const validateOperationTask = (task, props) => {
})
}
// Note: `bypassVdiChainsCheck` must be enabled because the XAPI might be not
// able to coalesce VDIs as fast as the tests run.
//
// See https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection
describe('backupNg', () => {
let defaultBackupNg
beforeAll(() => {
defaultBackupNg = {
name: 'default-backupNg',
mode: 'full',
vms: {
id: config.vms.default,
},
settings: {
'': {
reportWhen: 'never',
},
},
}
})
describe('.createJob() :', () => {
it('creates a new backup job without schedules', async () => {
const jobInput = {
mode: 'full',
vms: {
id: config.vms.default,
},
}
const jobOutput = await xo.createTempBackupNgJob(jobInput)
validateBackupJob(jobInput, jobOutput)
const backupNg = await xo.createTempBackupNgJob(defaultBackupNg)
expect(backupNg).toMatchSnapshot({
id: expect.any(String),
userId: expect.any(String),
vms: expect.any(Object),
})
expect(backupNg.vms).toEqual(defaultBackupNg.vms)
expect(backupNg.userId).toBe(xo._user.id)
})
it('creates a new backup job with schedules', async () => {
const scheduleTempId = randomId()
const jobInput = {
mode: 'full',
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
vms: {
id: config.vms.default,
},
}
const jobOutput = await xo.createTempBackupNgJob(jobInput)
validateBackupJob(
jobInput,
jobOutput,
await xo.getSchedule({ jobId: jobOutput.id })
)
})
const backupNgJob = await xo.call('backupNg.getJob', { id: jobId })
expect(backupNgJob).toMatchSnapshot({
id: expect.any(String),
userId: expect.any(String),
settings: expect.any(Object),
vms: expect.any(Object),
})
expect(backupNgJob.vms).toEqual(defaultBackupNg.vms)
expect(backupNgJob.userId).toBe(xo._user.id)
expect(Object.keys(backupNgJob.settings).length).toBe(2)
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
expect(backupNgJob.settings[schedule.id]).toEqual({
snapshotRetention: 1,
})
expect(schedule).toMatchSnapshot({
id: expect.any(String),
jobId: expect.any(String),
})
})
})
describe('.delete() :', () => {
it('deletes a backup job', async () => {
const scheduleTempId = randomId()
const jobId = await xo.call('backupNg.createJob', {
mode: 'full',
name: getDefaultName(),
vms: {
id: config.vms.default,
},
const { id: jobId } = await xo.call('backupNg.createJob', {
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
})
@@ -164,19 +173,16 @@ describe('backupNg', () => {
describe('.runJob() :', () => {
it('fails trying to run a backup job without schedule', async () => {
const { id } = await xo.createTempBackupNgJob({
vms: {
id: config.vms.default,
},
})
const { id } = await xo.createTempBackupNgJob(defaultBackupNg)
await expect(xo.call('backupNg.runJob', { id })).rejects.toMatchSnapshot()
})
it('fails trying to run a backup job with no matching VMs', async () => {
const scheduleTempId = randomId()
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
[scheduleTempId]: { snapshotRetention: 1 },
@@ -199,8 +205,9 @@ describe('backupNg', () => {
jest.setTimeout(7e3)
const scheduleTempId = randomId()
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
[scheduleTempId]: { snapshotRetention: 1 },
@@ -224,23 +231,25 @@ describe('backupNg', () => {
jest.setTimeout(8e3)
await xo.createTempServer(config.servers.default)
const { id: vmIdWithoutDisks } = await xo.createTempVm({
name_label: 'XO Test Without Disks',
name_description: 'Creating a vm without disks',
template: config.templates.templateWithoutDisks,
})
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: { snapshotRetention: 1 },
},
vms: {
id: vmIdWithoutDisks,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -255,16 +264,12 @@ describe('backupNg', () => {
jobId,
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'skipped',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
expect(vmTask).toMatchSnapshot({
@@ -288,24 +293,22 @@ describe('backupNg', () => {
const scheduleTempId = randomId()
await xo.createTempServer(config.servers.default)
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
...defaultBackupNg.settings,
[scheduleTempId]: {},
},
srs: {
id: config.srs.default,
},
vms: {
id: config.vms.default,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -321,15 +324,12 @@ describe('backupNg', () => {
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'failure',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
expect(task).toMatchSnapshot({
@@ -352,6 +352,7 @@ describe('backupNg', () => {
jest.setTimeout(6e4)
await xo.createTempServer(config.servers.default)
let vm = await xo.createTempVm({
name_label: 'XO Test Temp',
name_description: 'Creating a temporary vm',
template: config.templates.default,
VDIs: [
@@ -364,18 +365,22 @@ describe('backupNg', () => {
})
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
...defaultBackupNg,
vms: {
id: vm.id,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
'': {
bypassVdiChainsCheck: true,
reportWhen: 'never',
},
[scheduleTempId]: { snapshotRetention: 2 },
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -415,15 +420,12 @@ describe('backupNg', () => {
scheduleId: schedule.id,
})
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
status: 'success',
expect(log).toMatchSnapshot({
end: expect.any(Number),
id: expect.any(String),
jobId: expect.any(String),
scheduleId: expect.any(String),
start: expect.any(Number),
})
const subTaskSnapshot = subTasks.find(
@@ -468,7 +470,7 @@ describe('backupNg', () => {
const exportRetention = 2
const fullInterval = 2
const scheduleTempId = randomId()
const jobInput = {
const { id: jobId } = await xo.createTempBackupNgJob({
mode: 'delta',
remotes: {
id: {
@@ -476,11 +478,13 @@ describe('backupNg', () => {
},
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
[scheduleTempId]: DEFAULT_SCHEDULE,
},
settings: {
'': {
bypassVdiChainsCheck: true,
fullInterval,
reportWhen: 'never',
},
[remoteId1]: { deleteFirst: true },
[scheduleTempId]: { exportRetention },
@@ -488,8 +492,7 @@ describe('backupNg', () => {
vms: {
id: vmToBackup,
},
}
const { id: jobId } = await xo.createTempBackupNgJob(jobInput)
})
const schedule = await xo.getSchedule({ jobId })
expect(typeof schedule).toBe('object')
@@ -512,12 +515,10 @@ describe('backupNg', () => {
backupLogs.forEach(({ tasks = [], ...log }, key) => {
validateRootTask(log, {
data: {
mode: jobInput.mode,
reportWhen: jobInput.settings[''].reportWhen,
mode: 'delta',
reportWhen: 'never',
},
jobId,
jobName: jobInput.name,
scheduleId: schedule.id,
message: 'backup',
status: 'success',
})
@@ -584,110 +585,4 @@ describe('backupNg', () => {
})
})
})
test('create and execute backup with enabled offline backup', async () => {
const vm = xo.objects.all[config.vms.withOsAndXenTools]
if (vm.power_state !== 'Running') {
await xo.startTempVm(vm.id, { force: true }, true)
}
const scheduleTempId = randomId()
const srId = config.srs.default
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const backupInput = {
mode: 'full',
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
},
settings: {
'': {
offlineBackup: true,
},
[scheduleTempId]: {
copyRetention: 1,
exportRetention: 1,
},
},
srs: {
id: srId,
},
vms: {
id: vm.id,
},
}
const backup = await xo.createTempBackupNgJob(backupInput)
expect(backup.settings[''].offlineBackup).toBe(true)
const schedule = await xo.getSchedule({ jobId: backup.id })
await Promise.all([
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Halted') {
throw new Error('retry')
}
}),
])
await xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Running') {
throw new Error('retry')
}
})
const backupLogs = await xo.getBackupLogs({
jobId: backup.id,
scheduleId: schedule.id,
})
expect(backupLogs.length).toBe(1)
const { tasks, ...log } = backupLogs[0]
validateRootTask(log, {
data: {
mode: backupInput.mode,
reportWhen: backupInput.settings[''].reportWhen,
},
jobId: backup.id,
jobName: backupInput.name,
scheduleId: schedule.id,
status: 'success',
})
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...vmTask }) => {
validateVmTask(vmTask, vm.id, { status: 'success' })
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...subTask }) => {
expect(subTask.message).not.toBe('snapshot')
if (subTask.message === 'export') {
validateExportTask(
subTask,
subTask.data.type === 'remote' ? remoteId : srId,
{
data: expect.any(Object),
status: 'success',
}
)
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(operationTask => {
if (
operationTask.message === 'transfer' ||
operationTask.message === 'merge'
) {
validateOperationTask(operationTask, {
result: { size: expect.any(Number) },
status: 'success',
})
}
})
}
})
})
}, 200e3)
})

View File

@@ -6,7 +6,7 @@ import expect from 'must'
// ===================================================================
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
import { map } from 'lodash'
import { map, assign } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -27,7 +27,7 @@ describe('disk', () => {
const config = await getConfig()
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -1,6 +1,6 @@
/* eslint-env jest */
import { find, map } from 'lodash'
import { assign, find, map } from 'lodash'
import { config, rejectionOf, xo } from './util'
@@ -151,7 +151,7 @@ describe('server', () => {
it('connects to a Xen server', async () => {
const serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
@@ -184,7 +184,7 @@ describe('server', () => {
let serverId
beforeEach(async () => {
serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
id: serverId,

View File

@@ -12,7 +12,7 @@ import {
getOneHost,
waitObjectState,
} from './util'
import { map } from 'lodash'
import { assign, map } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -33,7 +33,7 @@ describe('vbd', () => {
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -34,14 +34,14 @@
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,32 +0,0 @@
{
"name": "xo-server-transport-icinga2",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-transport-icinga2",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-transport-icinga2",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "./dist",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.1.0",
"engines": {
"node": ">=8.9.4"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0"
},
"private": true
}

View File

@@ -1,136 +0,0 @@
import assert from 'assert'
import { URL } from 'url'
// =============================================================================
export const configurationSchema = {
type: 'object',
properties: {
server: {
type: 'string',
description: `
The icinga2 server http/https address.
*If no port is provided in the URL, 5665 will be used.*
Examples:
- https://icinga2.example.com
- http://192.168.0.1:1234
`.trim(),
},
user: {
type: 'string',
description: 'The icinga2 server username',
},
password: {
type: 'string',
description: 'The icinga2 server password',
},
filter: {
type: 'string',
description: `
The filter to use
See: https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/#filters
Example:
- Monitor the backup jobs of the VMs of a specific host:
\`host.name=="xoa.example.com" && service.name=="xo-backup"\`
`.trim(),
},
acceptUnauthorized: {
type: 'boolean',
description: 'Accept unauthorized certificates',
default: false,
},
},
additionalProperties: false,
required: ['server'],
}
// =============================================================================
const STATUS_MAP = {
OK: 0,
WARNING: 1,
CRITICAL: 2,
UNKNOWN: 3,
}
// =============================================================================
class XoServerIcinga2 {
constructor({ xo }) {
this._xo = xo
}
// ---------------------------------------------------------------------------
configure(configuration) {
const serverUrl = new URL(configuration.server)
if (configuration.user !== '') {
serverUrl.username = configuration.user
}
if (configuration.password !== '') {
serverUrl.password = configuration.password
}
if (serverUrl.port === '') {
serverUrl.port = '5665' // Default icinga2 access port
}
serverUrl.pathname = '/v1/actions/process-check-result'
this._url = serverUrl.href
this._filter =
configuration.filter !== undefined ? configuration.filter : ''
this._acceptUnauthorized = configuration.acceptUnauthorized
}
load() {
this._unset = this._xo.defineProperty(
'sendIcinga2Status',
this._sendIcinga2Status,
this
)
}
unload() {
this._unset()
}
test() {
return this._sendIcinga2Status({
message:
'The server-icinga2 plugin for Xen Orchestra server seems to be working fine, nicely done :)',
status: 'OK',
})
}
// ---------------------------------------------------------------------------
_sendIcinga2Status({ message, status }) {
const icinga2Status = STATUS_MAP[status]
assert(icinga2Status !== undefined, `Invalid icinga2 status: ${status}`)
return this._xo
.httpRequest(this._url, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
},
rejectUnauthorized: !this._acceptUnauthorized,
body: JSON.stringify({
type: 'Service',
filter: this._filter,
plugin_output: message,
exit_status: icinga2Status,
}),
})
.readAll()
}
}
// =============================================================================
export default opts => new XoServerIcinga2(opts)

View File

@@ -39,7 +39,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-preset-env": "^1.5.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -33,14 +33,14 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"slack-node": "^0.1.8"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -40,7 +40,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -36,20 +36,20 @@
},
"dependencies": {
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"@xen-orchestra/log": "^0.2.0",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -5,6 +5,7 @@ import humanFormat from 'human-format'
import { createSchedule } from '@xen-orchestra/cron'
import { minify } from 'html-minifier'
import {
assign,
concat,
differenceBy,
filter,
@@ -417,7 +418,7 @@ function computeGlobalVmsStats({ haltedVms, vmsStats, xo }) {
}))
)
return Object.assign(
return assign(
computeMeans(vmsStats, [
'cpu',
'ram',
@@ -445,7 +446,7 @@ function computeGlobalHostsStats({ haltedHosts, hostsStats, xo }) {
}))
)
return Object.assign(
return assign(
computeMeans(hostsStats, [
'cpu',
'ram',

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.51.1",
"version": "5.50.1",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -30,12 +30,12 @@
"bin": "bin"
},
"engines": {
"node": ">=8"
"node": ">=6"
},
"dependencies": {
"@iarna/toml": "^2.2.1",
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.10.1",
@@ -58,15 +58,16 @@
"debug": "^4.0.1",
"decorator-synchronized": "^0.5.0",
"deptree": "^1.0.0",
"escape-string-regexp": "^1.0.5",
"event-to-promise": "^0.8.0",
"exec-promise": "^0.7.0",
"execa": "^2.0.5",
"execa": "^1.0.0",
"express": "^4.16.2",
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.7.1",
"helmet": "^3.9.0",
@@ -90,7 +91,7 @@
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"make-error": "^1",
"micromatch": "^4.0.2",
"micromatch": "^3.1.4",
"minimist": "^1.2.0",
"moment-timezone": "^0.5.14",
"ms": "^2.1.1",
@@ -102,7 +103,7 @@
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^24.0.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pump": "^3.0.0",
@@ -122,7 +123,7 @@
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.7.0",
"ws": "^7.1.2",
"ws": "^6.0.0",
"xen-api": "^0.27.2",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
@@ -147,7 +148,7 @@
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-dev": "^2.0.1",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
},

View File

@@ -8,7 +8,7 @@ import { safeDateFormat } from '../utils'
export function createJob({ schedules, ...job }) {
job.userId = this.user.id
return this.createBackupNgJob(job, schedules).then(({ id }) => id)
return this.createBackupNgJob(job, schedules)
}
createJob.permission = 'admin'

View File

@@ -777,7 +777,7 @@ export async function probeIscsiExists({
)
const srs = []
forEach(ensureArray(xml.SRlist.SR), sr => {
forEach(ensureArray(xml['SRlist'].SR), sr => {
// get the UUID of SR connected to this LUN
srs.push({ uuid: sr.UUID.trim() })
})
@@ -845,7 +845,7 @@ export async function probeNfsExists({ host, server, serverPath }) {
const srs = []
forEach(ensureArray(xml.SRlist.SR), sr => {
forEach(ensureArray(xml['SRlist'].SR), sr => {
// get the UUID of SR connected to this LUN
srs.push({ uuid: sr.UUID.trim() })
})

View File

@@ -1,7 +1,7 @@
// FIXME: rename to disk.*
import { invalidParameters } from 'xo-common/api-errors'
import { reduce } from 'lodash'
import { isArray, reduce } from 'lodash'
import { parseSize } from '../utils'
@@ -85,7 +85,7 @@ export async function set(params) {
continue
}
for (const field of Array.isArray(fields) ? fields : [fields]) {
for (const field of isArray(fields) ? fields : [fields]) {
await xapi.call(`VDI.set_${field}`, ref, `${params[param]}`)
}
}

View File

@@ -85,7 +85,7 @@ async function rateLimitedRetry(action, shouldRetry, retryCount = 20) {
function createVolumeInfoTypes() {
function parseHeal(parsed) {
const bricks = []
parsed.healInfo.bricks.brick.forEach(brick => {
parsed['healInfo']['bricks']['brick'].forEach(brick => {
bricks.push(brick)
if (brick.file) {
brick.file = ensureArray(brick.file)
@@ -96,21 +96,21 @@ function createVolumeInfoTypes() {
function parseStatus(parsed) {
const brickDictByUuid = {}
const volume = parsed.volStatus.volumes.volume
volume.node.forEach(node => {
const volume = parsed['volStatus']['volumes']['volume']
volume['node'].forEach(node => {
brickDictByUuid[node.peerid] = brickDictByUuid[node.peerid] || []
brickDictByUuid[node.peerid].push(node)
})
return {
commandStatus: true,
result: { nodes: brickDictByUuid, tasks: volume.tasks },
result: { nodes: brickDictByUuid, tasks: volume['tasks'] },
}
}
async function parseInfo(parsed) {
const volume = parsed.volInfo.volumes.volume
volume.bricks = volume.bricks.brick
volume.options = volume.options.option
const volume = parsed['volInfo']['volumes']['volume']
volume['bricks'] = volume['bricks']['brick']
volume['options'] = volume['options']['option']
return { commandStatus: true, result: volume }
}
@@ -118,23 +118,23 @@ function createVolumeInfoTypes() {
return async function(sr) {
const glusterEndpoint = this::_getGlusterEndpoint(sr)
const cmdShouldRetry = result =>
!result.commandStatus &&
((result.parsed && result.parsed.cliOutput.opErrno === '30802') ||
!result['commandStatus'] &&
((result.parsed && result.parsed['cliOutput']['opErrno'] === '30802') ||
result.stderr.match(/Another transaction is in progress/))
const runCmd = async () =>
glusterCmd(glusterEndpoint, 'volume ' + command, true)
const commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry, 30)
return commandResult.commandStatus
? this::handler(commandResult.parsed.cliOutput, sr)
return commandResult['commandStatus']
? this::handler(commandResult.parsed['cliOutput'], sr)
: commandResult
}
}
async function profileType(sr) {
async function parseProfile(parsed) {
const volume = parsed.volProfile
volume.bricks = ensureArray(volume.brick)
delete volume.brick
const volume = parsed['volProfile']
volume['bricks'] = ensureArray(volume['brick'])
delete volume['brick']
return { commandStatus: true, result: volume }
}
@@ -143,9 +143,9 @@ function createVolumeInfoTypes() {
async function profileTopType(sr) {
async function parseTop(parsed) {
const volume = parsed.volTop
volume.bricks = ensureArray(volume.brick)
delete volume.brick
const volume = parsed['volTop']
volume['bricks'] = ensureArray(volume['brick'])
delete volume['brick']
return { commandStatus: true, result: volume }
}
@@ -326,7 +326,7 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
}
messageArray.push(`${key}: ${result[key]}`)
}
messageArray.push('command: ' + result.command.join(' '))
messageArray.push('command: ' + result['command'].join(' '))
messageKeys.splice(messageKeys.indexOf('command'), 1)
for (const key of messageKeys) {
messageArray.push(`${key}: ${JSON.stringify(result[key])}`)
@@ -343,7 +343,7 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
})
break
} catch (exception) {
if (exception.code !== 'HOST_OFFLINE') {
if (exception['code'] !== 'HOST_OFFLINE') {
throw exception
}
}
@@ -370,17 +370,19 @@ async function remoteSsh(glusterEndpoint, cmd, ignoreError = false) {
}
function findErrorMessage(commandResut) {
if (commandResut.exit === 0 && commandResut.parsed) {
const cliOut = commandResut.parsed.cliOutput
if (cliOut.opErrstr && cliOut.opErrstr.length) {
return cliOut.opErrstr
if (commandResut['exit'] === 0 && commandResut.parsed) {
const cliOut = commandResut.parsed['cliOutput']
if (cliOut['opErrstr'] && cliOut['opErrstr'].length) {
return cliOut['opErrstr']
}
// "peer probe" returns it's "already in peer" error in cliOutput/output
if (cliOut.output && cliOut.output.length) {
return cliOut.output
if (cliOut['output'] && cliOut['output'].length) {
return cliOut['output']
}
}
return commandResut.stderr.length ? commandResut.stderr : commandResut.stdout
return commandResut['stderr'].length
? commandResut['stderr']
: commandResut['stdout']
}
async function glusterCmd(glusterEndpoint, cmd, ignoreError = false) {
@@ -390,15 +392,15 @@ async function glusterCmd(glusterEndpoint, cmd, ignoreError = false) {
true
)
try {
result.parsed = parseXml(result.stdout)
result.parsed = parseXml(result['stdout'])
} catch (e) {
// pass, we never know if a message can be parsed or not, so we just try
}
if (result.exit === 0) {
const cliOut = result.parsed.cliOutput
if (result['exit'] === 0) {
const cliOut = result.parsed['cliOutput']
// we have found cases where opErrno is !=0 and opRet was 0, albeit the operation was an error.
result.commandStatus =
cliOut.opRet.trim() === '0' && cliOut.opErrno.trim() === '0'
cliOut['opRet'].trim() === '0' && cliOut['opErrno'].trim() === '0'
result.error = findErrorMessage(result)
} else {
result.commandStatus = false
@@ -791,7 +793,7 @@ export const createSR = defer(async function(
host: param.host.$id,
vm: { id: param.vm.$id, ip: param.address },
underlyingSr: param.underlyingSr.$id,
arbiter: !!param.arbiter,
arbiter: !!param['arbiter'],
}))
await xapi.xo.setData(xosanSrRef, 'xosan_config', {
version: 'beta2',
@@ -1298,7 +1300,7 @@ export const addBricks = defer(async function(
underlyingSr: newSr,
})
}
const arbiterNode = data.nodes.find(n => n.arbiter)
const arbiterNode = data.nodes.find(n => n['arbiter'])
if (arbiterNode) {
await glusterCmd(
glusterEndpoint,

View File

@@ -1,7 +1,7 @@
import Model from './model'
import { BaseError } from 'make-error'
import { EventEmitter } from 'events'
import { isObject, map } from './utils'
import { isArray, isObject, map } from './utils'
// ===================================================================
@@ -30,7 +30,7 @@ export default class Collection extends EventEmitter {
}
async add(models, opts) {
const array = Array.isArray(models)
const array = isArray(models)
if (!array) {
models = [models]
}
@@ -66,7 +66,7 @@ export default class Collection extends EventEmitter {
}
async remove(ids) {
if (!Array.isArray(ids)) {
if (!isArray(ids)) {
ids = [ids]
}
@@ -77,8 +77,8 @@ export default class Collection extends EventEmitter {
}
async update(models) {
const array = Array.isArray(models)
if (!array) {
const array = isArray(models)
if (!isArray(models)) {
models = [models]
}

View File

@@ -29,7 +29,13 @@ import { ensureDir, readdir, readFile } from 'fs-extra'
import parseDuration from './_parseDuration'
import Xo from './xo'
import { forEach, mapToArray, pFromCallback } from './utils'
import {
forEach,
isArray,
isFunction,
mapToArray,
pFromCallback,
} from './utils'
import bodyParser from 'body-parser'
import connectFlash from 'connect-flash'
@@ -275,16 +281,15 @@ async function registerPlugin(pluginPath, pluginName) {
// The default export can be either a factory or directly a plugin
// instance.
const instance =
typeof factory === 'function'
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
: factory
const instance = isFunction(factory)
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
: factory
await this.registerPlugin(
pluginName,
@@ -463,7 +468,7 @@ const setUpProxies = (express, opts, xo) => {
const setUpStaticFiles = (express, opts) => {
forEach(opts, (paths, url) => {
if (!Array.isArray(paths)) {
if (!isArray(paths)) {
paths = [paths]
}

View File

@@ -6,7 +6,6 @@ import ndjson from 'ndjson'
import parseArgs from 'minimist'
import sublevel from 'level-sublevel'
import util from 'util'
import { join as joinPath } from 'path'
import { repair as repairDb } from 'level'
import { forEach } from './utils'
@@ -175,7 +174,6 @@ export default async function main() {
}
const config = await appConf.load('xo-server', {
appDir: joinPath(__dirname, '..'),
ignoreUnknownFormats: true,
})

View File

@@ -8,21 +8,19 @@ const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => async (fields, ...args) => {
const { stdout } = await execa(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args,
])
return splitLines(stdout).map(
Array.isArray(fields) ? parse : line => parse(line)[fields]
)
return splitLines(
await execa.stdout(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args,
])
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
export const lvs = makeFunction('lvs')

View File

@@ -1,3 +1,5 @@
import assign from 'lodash/assign'
const _combine = (vectors, n, cb) => {
if (!n) {
return
@@ -33,7 +35,7 @@ export const combine = vectors => cb => _combine(vectors, vectors.length, cb)
// Merge the properties of an objects set in one object.
//
// Ex: mergeObjects([ { a: 1 }, { b: 2 } ]) => { a: 1, b: 2 }
export const mergeObjects = objects => Object.assign({}, ...objects)
export const mergeObjects = objects => assign({}, ...objects)
// Compute a cross product between vectors.
//

View File

@@ -1,6 +1,6 @@
import { EventEmitter } from 'events'
import { forEach, isEmpty } from './utils'
import { forEach, isEmpty, isString } from './utils'
// ===================================================================
@@ -30,7 +30,7 @@ export default class Model extends EventEmitter {
set(properties, value) {
// This method can also be used with two arguments to set a single
// property.
if (typeof properties === 'string') {
if (isString(properties)) {
properties = { [properties]: value }
}

View File

@@ -1,6 +1,5 @@
import appConf from 'app-conf'
import pw from 'pw'
import { join as joinPath } from 'path'
import Xo from './xo'
import { generateToken } from './utils'
@@ -27,7 +26,6 @@ xo-server-recover-account <user name or email>
const xo = new Xo(
await appConf.load('xo-server', {
appDir: joinPath(__dirname, '..'),
ignoreUnknownFormats: true,
})
)

View File

@@ -3,6 +3,7 @@ import forEach from 'lodash/forEach'
import has from 'lodash/has'
import highland from 'highland'
import humanFormat from 'human-format'
import isString from 'lodash/isString'
import keys from 'lodash/keys'
import multiKeyHashInt from 'multikey-hash'
import pick from 'lodash/pick'
@@ -207,7 +208,7 @@ export {
// -------------------------------------------------------------------
export function parseSize(size) {
if (typeof size !== 'string') {
if (!isString(size)) {
return size
}
@@ -255,9 +256,13 @@ export const safeDateParse = utcParse('%Y%m%dT%H%M%SZ')
//
// Exports them from here to avoid direct dependencies on lodash/
export { default as forEach } from 'lodash/forEach'
export { default as isArray } from 'lodash/isArray'
export { default as isBoolean } from 'lodash/isBoolean'
export { default as isEmpty } from 'lodash/isEmpty'
export { default as isFunction } from 'lodash/isFunction'
export { default as isInteger } from 'lodash/isInteger'
export { default as isObject } from 'lodash/isObject'
export { default as isString } from 'lodash/isString'
export { default as mapToArray } from 'lodash/map'
// -------------------------------------------------------------------
@@ -359,7 +364,7 @@ export const thunkToArray = thunk => {
// function foo (param = throwFn('param is required')()) {}
// ```
export const throwFn = error => () => {
throw typeof error === 'string' ? new Error(error) : error
throw isString(error) ? new Error(error) : error
}
// -------------------------------------------------------------------

View File

@@ -3,6 +3,7 @@ import ensureArray from './_ensureArray'
import {
extractProperty,
forEach,
isArray,
isEmpty,
mapFilter,
mapToArray,
@@ -26,7 +27,7 @@ function link(obj, prop, idField = '$id') {
return dynamicValue // Properly handles null and undefined.
}
if (Array.isArray(dynamicValue)) {
if (isArray(dynamicValue)) {
return mapToArray(dynamicValue, idField)
}

View File

@@ -42,6 +42,7 @@ import pRetry from '../_pRetry'
import {
camelToSnakeCase,
forEach,
isFunction,
map,
mapToArray,
pAll,
@@ -81,7 +82,7 @@ export const TAG_COPY_SRC = 'xo:copy_of'
// FIXME: remove this work around when fixed, https://phabricator.babeljs.io/T2877
// export * from './utils'
Object.assign(module.exports, require('./utils'))
require('lodash/assign')(module.exports, require('./utils'))
// VDI formats. (Raw is not available for delta vdi.)
export const VDI_FORMAT_VHD = 'vhd'
@@ -173,7 +174,7 @@ export default class Xapi extends XapiBase {
//
// TODO: implements a timeout.
_waitObject(predicate) {
if (typeof predicate === 'function') {
if (isFunction(predicate)) {
const { promise, resolve } = defer()
const unregister = this._registerGenericWatcher(obj => {
@@ -1575,7 +1576,7 @@ export default class Xapi extends XapiBase {
}
} else {
// Find the original template by name (*sigh*).
const templateNameLabel = vm.other_config.base_template_name
const templateNameLabel = vm.other_config['base_template_name']
const template =
templateNameLabel &&
find(

View File

@@ -1,3 +1,4 @@
import asyncMap from '@xen-orchestra/async-map'
import createLogger from '@xen-orchestra/log'
import deferrable from 'golike-defer'
import unzip from 'julien-f-unzip'
@@ -336,7 +337,7 @@ export default {
// INSTALL -------------------------------------------------------------------
async _xcpUpdate(hosts) {
_xcpUpdate(hosts) {
if (hosts === undefined) {
hosts = filter(this.objects.all, { $type: 'host' })
} else {
@@ -346,10 +347,7 @@ export default {
)
}
// XCP-ng hosts need to be updated one at a time starting with the pool master
// https://github.com/vatesfr/xen-orchestra/issues/4468
hosts = hosts.sort(({ $ref }) => ($ref === this.pool.master ? -1 : 1))
for (const host of hosts) {
return asyncMap(hosts, async host => {
const update = await this.call(
'host.call_plugin',
host.$ref,
@@ -366,7 +364,7 @@ export default {
String(Date.now() / 1000)
)
}
}
})
},
// Legacy XS patches: upload a patch on a pool before installing it

View File

@@ -9,7 +9,11 @@ import { satisfies as versionSatisfies } from 'semver'
import {
camelToSnakeCase,
forEach,
isArray,
isBoolean,
isFunction,
isInteger,
isString,
map,
mapFilter,
mapToArray,
@@ -41,10 +45,10 @@ export const prepareXapiParam = param => {
if (isInteger(param)) {
return asInteger(param)
}
if (typeof param === 'boolean') {
if (isBoolean(param)) {
return asBoolean(param)
}
if (Array.isArray(param)) {
if (isArray(param)) {
return map(param, prepareXapiParam)
}
if (isPlainObject(param)) {
@@ -131,14 +135,14 @@ export const makeEditObject = specs => {
return object => object[prop]
}
if (typeof get === 'string') {
if (isString(get)) {
return object => object[get]
}
return get
}
const normalizeSet = (set, name) => {
if (typeof set === 'function') {
if (isFunction(set)) {
return set
}
@@ -149,7 +153,7 @@ export const makeEditObject = specs => {
}
}
if (typeof set === 'string') {
if (isString(set)) {
const index = set.indexOf('.')
if (index === -1) {
const prop = camelToSnakeCase(set)
@@ -172,7 +176,7 @@ export const makeEditObject = specs => {
}
}
if (!Array.isArray(set)) {
if (!isArray(set)) {
throw new Error('must be an array, a function or a string')
}
@@ -208,7 +212,7 @@ export const makeEditObject = specs => {
}
forEach(spec.constraints, (constraint, constraintName) => {
if (typeof constraint !== 'function') {
if (!isFunction(constraint)) {
throw new Error('constraint must be a function')
}
@@ -230,15 +234,15 @@ export const makeEditObject = specs => {
return spec
}
forEach(specs, (spec, name) => {
typeof spec === 'string' || (specs[name] = normalizeSpec(spec, name))
isString(spec) || (specs[name] = normalizeSpec(spec, name))
})
// Resolves aliases and add camelCase and snake_case aliases.
forEach(specs, (spec, name) => {
if (typeof spec === 'string') {
if (isString(spec)) {
do {
spec = specs[spec]
} while (typeof spec === 'string')
} while (isString(spec))
specs[name] = spec
}

View File

@@ -2,7 +2,7 @@ import createLogger from '@xen-orchestra/log'
import kindOf from 'kindof'
import ms from 'ms'
import schemaInspector from 'schema-inspector'
import { forEach } from 'lodash'
import { forEach, isFunction } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { MethodNotFound } from 'json-rpc-peer'
@@ -183,7 +183,7 @@ export default class Api {
const addMethod = (method, name) => {
name = base + name
if (typeof method === 'function') {
if (isFunction(method)) {
removes.push(this.addApiMethod(name, method))
return
}

View File

@@ -53,7 +53,7 @@ import {
type Xapi,
TAG_COPY_SRC,
} from '../../xapi'
import { formatDateTime, getVmDisks } from '../../xapi/utils'
import { getVmDisks } from '../../xapi/utils'
import {
resolveRelativeFromFile,
safeDateFormat,
@@ -75,7 +75,6 @@ type Settings = {|
deleteFirst?: boolean,
copyRetention?: number,
exportRetention?: number,
offlineBackup?: boolean,
offlineSnapshot?: boolean,
reportWhen?: ReportWhen,
snapshotRetention?: number,
@@ -148,7 +147,6 @@ const defaultSettings: Settings = {
deleteFirst: false,
exportRetention: 0,
fullInterval: 0,
offlineBackup: false,
offlineSnapshot: false,
reportWhen: 'failure',
snapshotRetention: 0,
@@ -190,7 +188,7 @@ const getJobCompression = ({ compression: c }) =>
const listReplicatedVms = (
xapi: Xapi,
scheduleOrJobId: string,
srUuid?: string,
srId?: string,
vmUuid?: string
): Vm[] => {
const { all } = xapi.objects
@@ -205,7 +203,7 @@ const listReplicatedVms = (
'start' in object.blocked_operations &&
(oc['xo:backup:job'] === scheduleOrJobId ||
oc['xo:backup:schedule'] === scheduleOrJobId) &&
oc['xo:backup:sr'] === srUuid &&
oc['xo:backup:sr'] === srId &&
(oc['xo:backup:vm'] === vmUuid ||
// 2018-03-28, JFT: to catch VMs replicated before this fix
oc['xo:backup:vm'] === undefined)
@@ -481,21 +479,16 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
// Attributes on created VM snapshots:
//
// - `other_config`:
// - `xo:backup:datetime` = snapshot.snapshot_time (allow sorting replicated VMs)
// - `xo:backup:deltaChainLength` = n (number of delta copies/replicated since a full)
// - `xo:backup:exported` = 'true' (added at the end of the backup)
//
// Attributes on created VMs and created snapshots:
//
// - `other_config`:
// - `xo:backup:datetime`: format is UTC %Y%m%dT%H:%M:%SZ
// - from snapshots: snapshot.snapshot_time
// - with offline backup: formatDateTime(Date.now())
// - `xo:backup:job` = job.id
// - `xo:backup:schedule` = schedule.id
// - `xo:backup:vm` = vm.uuid
//
// Attributes of created VMs:
//
// - all snapshots attributes (see above)
// - `name_label`: `${original name} - ${job name} - (${safeDateFormat(backup timestamp)})`
// - tag:
// - copy in delta mode: `Continuous Replication`
@@ -1030,12 +1023,6 @@ export default class BackupNg {
throw new Error('copy, export and snapshot retentions cannot both be 0')
}
const isOfflineBackup =
mode === 'full' && getSetting(settings, 'offlineBackup', [vmUuid, ''])
if (isOfflineBackup && snapshotRetention > 0) {
throw new Error('offline backup is not compatible with rolling snapshot')
}
if (
!some(
vm.$VBDs,
@@ -1045,139 +1032,110 @@ export default class BackupNg {
throw new Error('no disks found')
}
let baseSnapshot, exported: Vm, exportDateTime
if (isOfflineBackup) {
exported = vm
exportDateTime = formatDateTime(Date.now())
if (vm.power_state === 'Running') {
await wrapTask(
{
logger,
message: 'shutdown VM',
parentId: taskId,
},
xapi.shutdownVm(vm)
)
$defer(() => xapi.startVm(vm))
}
} else {
const snapshots = vm.$snapshots
.filter(_ => _.other_config['xo:backup:job'] === jobId)
.sort(compareSnapshotTime)
const snapshots = vm.$snapshots
.filter(_ => _.other_config['xo:backup:job'] === jobId)
.sort(compareSnapshotTime)
const bypassVdiChainsCheck: boolean = getSetting(
settings,
'bypassVdiChainsCheck',
[vmUuid, '']
)
if (!bypassVdiChainsCheck) {
xapi._assertHealthyVdiChains(vm)
}
const offlineSnapshot: boolean = getSetting(settings, 'offlineSnapshot', [
vmUuid,
'',
])
const startAfterSnapshot = offlineSnapshot && vm.power_state === 'Running'
if (startAfterSnapshot) {
await wrapTask(
{
logger,
message: 'shutdown VM',
parentId: taskId,
},
xapi.shutdownVm(vm)
)
}
exported = (await wrapTask(
{
logger,
message: 'snapshot',
parentId: taskId,
result: _ => _.uuid,
},
xapi._snapshotVm(
$cancelToken,
vm,
`[XO Backup ${job.name}] ${vm.name_label}`
)
): any)
if (startAfterSnapshot) {
ignoreErrors.call(xapi.startVm(vm))
}
const bypassVdiChainsCheck: boolean = getSetting(
settings,
'bypassVdiChainsCheck',
[vmUuid, '']
)
if (!bypassVdiChainsCheck) {
xapi._assertHealthyVdiChains(vm)
}
const offlineSnapshot: boolean = getSetting(settings, 'offlineSnapshot', [
vmUuid,
'',
])
const startAfterSnapshot = offlineSnapshot && vm.power_state === 'Running'
if (startAfterSnapshot) {
await wrapTask(
{
logger,
message: 'add metadata to snapshot',
message: 'shutdown VM',
parentId: taskId,
},
exported.update_other_config({
'xo:backup:datetime': exported.snapshot_time,
'xo:backup:job': jobId,
'xo:backup:schedule': scheduleId,
'xo:backup:vm': vmUuid,
})
xapi.shutdownVm(vm)
)
}
exported = await xapi.barrier(exported.$ref)
if (mode === 'delta') {
baseSnapshot = findLast(
snapshots,
_ => 'xo:backup:exported' in _.other_config
)
// JFT 2018-10-02: support previous snapshots which did not have this
// entry, can be removed after 2018-12.
if (baseSnapshot === undefined) {
baseSnapshot = last(snapshots)
}
}
snapshots.push(exported)
// snapshots to delete due to the snapshot retention settings
const snapshotsToDelete = flatMap(
groupBy(snapshots, _ => _.other_config['xo:backup:schedule']),
(snapshots, scheduleId) =>
getOldEntries(
getSetting(settings, 'snapshotRetention', [scheduleId]),
snapshots
)
let snapshot: Vm = (await wrapTask(
{
logger,
message: 'snapshot',
parentId: taskId,
result: _ => _.uuid,
},
xapi._snapshotVm(
$cancelToken,
vm,
`[XO Backup ${job.name}] ${vm.name_label}`
)
): any)
// delete unused snapshots
await asyncMap(snapshotsToDelete, vm => {
// snapshot and baseSnapshot should not be deleted right now
if (vm !== exported && vm !== baseSnapshot) {
return xapi.deleteVm(vm)
}
if (startAfterSnapshot) {
ignoreErrors.call(xapi.startVm(vm))
}
await wrapTask(
{
logger,
message: 'add metadata to snapshot',
parentId: taskId,
},
snapshot.update_other_config({
'xo:backup:datetime': snapshot.snapshot_time,
'xo:backup:job': jobId,
'xo:backup:schedule': scheduleId,
'xo:backup:vm': vmUuid,
})
)
exported = ((await wrapTask(
{
logger,
message: 'waiting for uptodate snapshot record',
parentId: taskId,
},
xapi.barrier(exported.$ref)
): any): Vm)
snapshot = await xapi.barrier(snapshot.$ref)
if (mode === 'full' && snapshotsToDelete.includes(exported)) {
// TODO: do not create the snapshot if there are no snapshotRetention and
// the VM is not running
$defer.call(xapi, 'deleteVm', exported)
} else if (mode === 'delta') {
if (snapshotsToDelete.includes(exported)) {
$defer.onFailure.call(xapi, 'deleteVm', exported)
}
if (snapshotsToDelete.includes(baseSnapshot)) {
$defer.onSuccess.call(xapi, 'deleteVm', baseSnapshot)
}
let baseSnapshot
if (mode === 'delta') {
baseSnapshot = findLast(
snapshots,
_ => 'xo:backup:exported' in _.other_config
)
// JFT 2018-10-02: support previous snapshots which did not have this
// entry, can be removed after 2018-12.
if (baseSnapshot === undefined) {
baseSnapshot = last(snapshots)
}
}
snapshots.push(snapshot)
// snapshots to delete due to the snapshot retention settings
const snapshotsToDelete = flatMap(
groupBy(snapshots, _ => _.other_config['xo:backup:schedule']),
(snapshots, scheduleId) =>
getOldEntries(
getSetting(settings, 'snapshotRetention', [scheduleId]),
snapshots
)
)
// delete unused snapshots
await asyncMap(snapshotsToDelete, vm => {
// snapshot and baseSnapshot should not be deleted right now
if (vm !== snapshot && vm !== baseSnapshot) {
return xapi.deleteVm(vm)
}
})
snapshot = ((await wrapTask(
{
logger,
message: 'waiting for uptodate snapshot record',
parentId: taskId,
},
xapi.barrier(snapshot.$ref)
): any): Vm)
if (copyRetention === 0 && exportRetention === 0) {
return
@@ -1193,8 +1151,14 @@ export default class BackupNg {
const metadataFilename = `${vmDir}/${basename}.json`
if (mode === 'full') {
// TODO: do not create the snapshot if there are no snapshotRetention and
// the VM is not running
if (snapshotsToDelete.includes(snapshot)) {
$defer.call(xapi, 'deleteVm', snapshot)
}
let compress = getJobCompression(job)
const pool = exported.$pool
const pool = snapshot.$pool
if (
compress === 'zstd' &&
pool.restrictions.restrict_zstd_export !== 'false'
@@ -1211,10 +1175,10 @@ export default class BackupNg {
let xva: any = await wrapTask(
{
logger,
message: 'start VM export',
message: 'start snapshot export',
parentId: taskId,
},
xapi.exportVm($cancelToken, exported, {
xapi.exportVm($cancelToken, snapshot, {
compress,
})
)
@@ -1239,7 +1203,7 @@ export default class BackupNg {
timestamp: now,
version: '2.0.0',
vm,
vmSnapshot: exported.id !== vm.id ? exported : undefined,
vmSnapshot: snapshot,
xva: `./${dataBasename}`,
}
const dataFilename = `${vmDir}/${dataBasename}`
@@ -1323,7 +1287,7 @@ export default class BackupNg {
async (taskId, sr) => {
const fork = forkExport()
const { uuid: srUuid, xapi } = sr
const { $id: srId, xapi } = sr
// delete previous interrupted copies
ignoreErrors.call(
@@ -1335,7 +1299,7 @@ export default class BackupNg {
const oldVms = getOldEntries(
copyRetention - 1,
listReplicatedVms(xapi, scheduleId, srUuid, vmUuid)
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
)
const deleteOldBackups = () =>
@@ -1347,9 +1311,7 @@ export default class BackupNg {
},
this._deleteVms(xapi, oldVms)
)
const deleteFirst = getSetting(settings, 'deleteFirst', [
srUuid,
])
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
if (deleteFirst) {
await deleteOldBackups()
}
@@ -1379,15 +1341,7 @@ export default class BackupNg {
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
!isOfflineBackup
? vm.update_other_config('xo:backup:sr', srUuid)
: vm.update_other_config({
'xo:backup:datetime': exportDateTime,
'xo:backup:job': jobId,
'xo:backup:schedule': scheduleId,
'xo:backup:sr': srUuid,
'xo:backup:vm': exported.uuid,
}),
vm.update_other_config('xo:backup:sr', srId),
])
if (!deleteFirst) {
@@ -1400,6 +1354,13 @@ export default class BackupNg {
noop // errors are handled in logs
)
} else if (mode === 'delta') {
if (snapshotsToDelete.includes(snapshot)) {
$defer.onFailure.call(xapi, 'deleteVm', snapshot)
}
if (snapshotsToDelete.includes(baseSnapshot)) {
$defer.onSuccess.call(xapi, 'deleteVm', baseSnapshot)
}
let deltaChainLength = 0
let fullVdisRequired
await (async () => {
@@ -1437,11 +1398,11 @@ export default class BackupNg {
}
})
for (const { uuid: srUuid, xapi } of srs) {
for (const { $id: srId, xapi } of srs) {
const replicatedVm = listReplicatedVms(
xapi,
jobId,
srUuid,
srId,
vmUuid
).find(vm => vm.other_config[TAG_COPY_SRC] === baseSnapshot.uuid)
if (replicatedVm === undefined) {
@@ -1507,7 +1468,7 @@ export default class BackupNg {
message: 'start snapshot export',
parentId: taskId,
},
xapi.exportDeltaVm($cancelToken, exported, baseSnapshot, {
xapi.exportDeltaVm($cancelToken, snapshot, baseSnapshot, {
fullVdisRequired,
})
)
@@ -1529,7 +1490,7 @@ export default class BackupNg {
}/${basename}.vhd`
),
vm,
vmSnapshot: exported,
vmSnapshot: snapshot,
}
const jsonMetadata = JSON.stringify(metadata)
@@ -1695,7 +1656,7 @@ export default class BackupNg {
async (taskId, sr) => {
const fork = forkExport()
const { uuid: srUuid, xapi } = sr
const { $id: srId, xapi } = sr
// delete previous interrupted copies
ignoreErrors.call(
@@ -1707,7 +1668,7 @@ export default class BackupNg {
const oldVms = getOldEntries(
copyRetention - 1,
listReplicatedVms(xapi, scheduleId, srUuid, vmUuid)
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
)
const deleteOldBackups = () =>
@@ -1720,9 +1681,7 @@ export default class BackupNg {
this._deleteVms(xapi, oldVms)
)
const deleteFirst = getSetting(settings, 'deleteFirst', [
srUuid,
])
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
if (deleteFirst) {
await deleteOldBackups()
}
@@ -1739,7 +1698,7 @@ export default class BackupNg {
name_label: `${metadata.vm.name_label} - ${
job.name
} - (${safeDateFormat(metadata.timestamp)})`,
srId: sr.$id,
srId,
})
)
@@ -1750,7 +1709,7 @@ export default class BackupNg {
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
vm.update_other_config('xo:backup:sr', srUuid),
vm.update_other_config('xo:backup:sr', srId),
])
if (!deleteFirst) {
@@ -1765,7 +1724,7 @@ export default class BackupNg {
if (!isFull) {
ignoreErrors.call(
exported.update_other_config(
snapshot.update_other_config(
'xo:backup:deltaChainLength',
String(deltaChainLength)
)
@@ -1775,16 +1734,14 @@ export default class BackupNg {
throw new Error(`no exporter for backup mode ${mode}`)
}
if (!isOfflineBackup) {
await wrapTask(
{
logger,
message: 'set snapshot.other_config[xo:backup:exported]',
parentId: taskId,
},
exported.update_other_config('xo:backup:exported', 'true')
)
}
await wrapTask(
{
logger,
message: 'set snapshot.other_config[xo:backup:exported]',
parentId: taskId,
},
snapshot.update_other_config('xo:backup:exported', 'true')
)
}
async _deleteDeltaVmBackups(

View File

@@ -1,6 +1,7 @@
import asyncMap from '@xen-orchestra/async-map'
import createLogger from '@xen-orchestra/log'
import deferrable from 'golike-defer'
import escapeStringRegexp from 'escape-string-regexp'
import execa from 'execa'
import splitLines from 'split-lines'
import { CancelToken, fromEvent, ignoreErrors } from 'promise-toolbox'
@@ -9,15 +10,7 @@ import { createReadStream, readdir, stat } from 'fs'
import { satisfies as versionSatisfies } from 'semver'
import { utcFormat } from 'd3-time-format'
import { basename, dirname } from 'path'
import {
escapeRegExp,
filter,
find,
includes,
once,
range,
sortBy,
} from 'lodash'
import { filter, find, includes, once, range, sortBy, trim } from 'lodash'
import {
chainVhd,
createSyntheticStream as createVhdReadStream,
@@ -26,7 +19,6 @@ import {
import createSizeStream from '../size-stream'
import xapiObjectToXo from '../xapi-object-to-xo'
import { debounceWithKey } from '../_pDebounceWithKey'
import { lvs, pvs } from '../lvm'
import {
forEach,
@@ -44,7 +36,6 @@ import {
// ===================================================================
const DEBOUNCE_DELAY = 10e3
const DELTA_BACKUP_EXT = '.json'
const DELTA_BACKUP_EXT_LENGTH = DELTA_BACKUP_EXT.length
const TAG_SOURCE_VM = 'xo:source_vm'
@@ -148,20 +139,22 @@ const listPartitions = (() => {
})
return device =>
execa('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
device.path,
]).then(({ stdout }) =>
mapFilter(splitLines(stdout), line => {
const partition = parseLine(line)
const { type } = partition
if (type != null && !IGNORED[+type]) {
return partition
}
})
)
execa
.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
device.path,
])
.then(stdout =>
mapFilter(splitLines(stdout), line => {
const partition = parseLine(line)
const { type } = partition
if (type != null && !IGNORED[+type]) {
return partition
}
})
)
})()
// handle LVM logical volumes automatically
@@ -278,8 +271,8 @@ const mountLvmPv = (device, partition) => {
}
args.push('--show', '-f', device.path)
return execa('losetup', args).then(({ stdout }) => {
const path = stdout.trim()
return execa.stdout('losetup', args).then(stdout => {
const path = trim(stdout)
return {
path,
unmount: once(() =>
@@ -301,9 +294,6 @@ export default class {
this._xo = xo
}
@debounceWithKey.decorate(DEBOUNCE_DELAY, function keyFn(remoteId) {
return [this, remoteId]
})
async listRemoteBackups(remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
@@ -330,9 +320,6 @@ export default class {
return backups
}
@debounceWithKey.decorate(DEBOUNCE_DELAY, function keyFn(remoteId) {
return [this, remoteId]
})
async listVmBackups(remoteId) {
const handler = await this._xo.getRemoteHandler(remoteId)
@@ -875,7 +862,7 @@ export default class {
const files = await handler.list('.')
const reg = new RegExp(
'^[^_]+_' + escapeRegExp(`${tag}_${vm.name_label}.xva`)
'^[^_]+_' + escapeStringRegexp(`${tag}_${vm.name_label}.xva`)
)
const backups = sortBy(filter(files, fileName => reg.test(fileName)))
@@ -900,7 +887,9 @@ export default class {
xapi._assertHealthyVdiChains(vm)
const reg = new RegExp('^rollingSnapshot_[^_]+_' + escapeRegExp(tag) + '_')
const reg = new RegExp(
'^rollingSnapshot_[^_]+_' + escapeStringRegexp(tag) + '_'
)
const snapshots = sortBy(
filter(vm.$snapshots, snapshot => reg.test(snapshot.name_label)),
'name_label'
@@ -937,7 +926,9 @@ export default class {
const transferStart = Date.now()
tag = 'DR_' + tag
const reg = new RegExp(
'^' + escapeRegExp(`${vm.name_label}_${tag}_`) + '[0-9]{8}T[0-9]{6}Z$'
'^' +
escapeStringRegexp(`${vm.name_label}_${tag}_`) +
'[0-9]{8}T[0-9]{6}Z$'
)
const targetXapi = this._xo.getXapi(sr)

View File

@@ -87,7 +87,7 @@ async function mountLvmPhysicalVolume(devicePath, partition) {
args.push('-o', partition.start * 512)
}
args.push('--show', '-f', devicePath)
const path = (await execa('losetup', args)).stdout.trim()
const path = (await execa.stdout('losetup', args)).trim()
await execa('pvscan', ['--cache', path])
return {
@@ -251,7 +251,7 @@ export default class BackupNgFileRestore {
}
async _listPartitions(devicePath, inspectLvmPv = true) {
const { stdout } = await execa('partx', [
const stdout = await execa.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',

View File

@@ -1,7 +1,7 @@
import asyncMap from '@xen-orchestra/async-map'
import { createPredicate } from 'value-matcher'
import { timeout } from 'promise-toolbox'
import { filter, isEmpty, map, mapValues } from 'lodash'
import { assign, filter, isEmpty, map, mapValues } from 'lodash'
import { crossProduct } from '../../math'
import { serializeError, thunkToArray } from '../../utils'
@@ -82,11 +82,7 @@ export default async function executeJobCall({
params,
start: Date.now(),
})
let promise = app.callApiMethod(
session,
job.method,
Object.assign({}, params)
)
let promise = app.callApiMethod(session, job.method, assign({}, params))
if (job.timeout) {
promise = promise::timeout(job.timeout)
}

View File

@@ -4,7 +4,7 @@ import { invalidParameters, noSuchObject } from 'xo-common/api-errors'
import * as sensitiveValues from '../sensitive-values'
import { PluginsMetadata } from '../models/plugin-metadata'
import { mapToArray } from '../utils'
import { isFunction, mapToArray } from '../utils'
// ===================================================================
@@ -65,9 +65,9 @@ export default class {
id,
instance,
name,
testable: typeof instance.test === 'function',
testable: isFunction(instance.test),
testSchema,
unloadable: typeof instance.unload === 'function',
unloadable: isFunction(instance.unload),
version,
})

View File

@@ -1,6 +1,7 @@
import asyncMap from '@xen-orchestra/async-map'
import synchronized from 'decorator-synchronized'
import {
assign,
every,
forEach,
isObject,
@@ -122,7 +123,7 @@ export default class {
}
async computeVmResourcesUsage(vm) {
return Object.assign(
return assign(
computeVmResourcesUsage(this._xo.getXapi(vm).getObject(vm._xapiId)),
await this._xo.computeVmIpPoolsUsage(vm)
)

View File

@@ -77,10 +77,7 @@ export default class Scheduling {
'schedules',
() => db.get(),
schedules =>
asyncMap(schedules, async schedule => {
await db.update(normalize(schedule))
this._start(schedule.id)
}),
asyncMap(schedules, schedule => db.update(normalize(schedule))),
['jobs']
)

View File

@@ -2,7 +2,7 @@ import levelup from 'level-party'
import sublevel from 'level-sublevel'
import { ensureDir } from 'fs-extra'
import { forEach, promisify } from '../utils'
import { forEach, isFunction, promisify } from '../utils'
// ===================================================================
@@ -32,7 +32,7 @@ const levelHas = db => {
const levelPromise = db => {
const dbP = {}
forEach(db, (value, name) => {
if (typeof value !== 'function') {
if (!isFunction(value)) {
return
}

View File

@@ -10,7 +10,13 @@ import parseDuration from '../_parseDuration'
import Xapi from '../xapi'
import xapiObjectToXo from '../xapi-object-to-xo'
import XapiStats from '../xapi-stats'
import { camelToSnakeCase, forEach, isEmpty, popProperty } from '../utils'
import {
camelToSnakeCase,
forEach,
isEmpty,
isString,
popProperty,
} from '../utils'
import { Servers } from '../models/server'
// ===================================================================
@@ -455,7 +461,7 @@ export default class {
// Returns the XAPI connection associated to an object.
getXapi(object, type) {
if (typeof object === 'string') {
if (isString(object)) {
object = this._xo.getObject(object, type)
}

View File

@@ -9,6 +9,8 @@ import {
forEach,
includes,
isEmpty,
isFunction,
isString,
iteratee,
map as mapToArray,
stubTrue,
@@ -71,8 +73,7 @@ export default class Xo extends EventEmitter {
if (
type != null &&
((typeof type === 'string' && type !== obj.type) ||
!includes(type, obj.type)) // Array
((isString(type) && type !== obj.type) || !includes(type, obj.type)) // Array
) {
throw noSuchObject(key, type)
}
@@ -209,7 +210,7 @@ export default class Xo extends EventEmitter {
}
// For security, prevent from accessing `this`.
if (typeof value === 'function') {
if (isFunction(value)) {
value = (value =>
function() {
return value.apply(thisArg, arguments)

View File

@@ -27,7 +27,7 @@
"child-process-promise": "^2.0.3",
"core-js": "^3.0.0",
"pipette": "^0.9.3",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"tmp": "^0.1.0",
"vhd-lib": "^0.7.0"
},
@@ -36,7 +36,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"execa": "^2.0.2",
"fs-extra": "^8.0.1",

View File

@@ -43,6 +43,6 @@ test('VMDKDirectParser reads OK', async () => {
}
expect(harvested.length).toEqual(2)
expect(harvested[0].offsetBytes).toEqual(0)
expect(harvested[0].data.length).toEqual(header.grainSizeSectors * 512)
expect(harvested[1].offsetBytes).toEqual(header.grainSizeSectors * 512)
expect(harvested[0].data.length).toEqual(header['grainSizeSectors'] * 512)
expect(harvested[1].offsetBytes).toEqual(header['grainSizeSectors'] * 512)
})

Some files were not shown because too many files have changed in this diff Show More