Compare commits

..

2 Commits

Author SHA1 Message Date
BARHTAOUI
7d72165997 chore(CHANGELOG): update next 2019-10-10 15:56:19 +02:00
BARHTAOUI
58bfde62bd feat(xo-web): 5.50.3 2019-10-10 15:53:52 +02:00
288 changed files with 7039 additions and 9212 deletions

View File

@@ -21,7 +21,7 @@ module.exports = {
overrides: [
{
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
rules: {
'no-console': 'off',
},

View File

@@ -36,7 +36,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -8,8 +8,5 @@
"directory": "@xen-orchestra/babel-config",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": ">=6"
}
}

View File

@@ -1,32 +0,0 @@
const getopts = require('getopts')
const { version } = require('./package.json')
module.exports = commands =>
async function(args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
},
boolean: ['help'],
stopEarly: true,
})
const commandName = opts.help || args.length === 0 ? 'help' : args[0]
const command = commands[commandName]
if (command === undefined) {
process.stdout.write(`Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${prefix} ${command} ${commands[command].usage || ''}`)
.join('\n\n')}
xo-backups v${version}
`)
process.exitCode = commandName === 'help' ? 0 : 1
return
}
return command.main(args.slice(1), prefix + ' ' + commandName)
}

View File

@@ -1,393 +0,0 @@
#!/usr/bin/env node
// assigned when options are parsed by the main function
let force
// -----------------------------------------------------------------------------
const assert = require('assert')
const getopts = require('getopts')
const lockfile = require('proper-lockfile')
const { default: Vhd } = require('vhd-lib')
const { curryRight, flatten } = require('lodash')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { pipe, promisifyAll } = require('promise-toolbox')
const fs = promisifyAll(require('fs'))
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
// -----------------------------------------------------------------------------
const asyncMap = curryRight((iterable, fn) =>
Promise.all(
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
)
)
const filter = (...args) => thisArg => thisArg.filter(...args)
const isGzipFile = async fd => {
// https://tools.ietf.org/html/rfc1952.html#page-5
const magicNumber = Buffer.allocUnsafe(2)
assert.strictEqual(
await fs.read(fd, magicNumber, 0, magicNumber.length, 0),
magicNumber.length
)
return magicNumber[0] === 31 && magicNumber[1] === 139
}
// TODO: better check?
//
// our heuristic is not good enough, there has been some false positives
// (detected as invalid by us but valid by `tar` and imported with success),
// either THOUGH THEY MAY HAVE BEEN COMPRESSED FILES:
// - these files were normal but the check is incorrect
// - these files were invalid but without data loss
// - these files were invalid but with silent data loss
//
// maybe reading the end of the file looking for a file named
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
//
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
const isValidTar = async (size, fd) => {
if (size <= 1024 || size % 512 !== 0) {
return false
}
const buf = Buffer.allocUnsafe(1024)
assert.strictEqual(
await fs.read(fd, buf, 0, buf.length, size - buf.length),
buf.length
)
return buf.every(_ => _ === 0)
}
// TODO: find an heuristic for compressed files
const isValidXva = async path => {
try {
const fd = await fs.open(path, 'r')
try {
const { size } = await fs.fstat(fd)
if (size < 20) {
// neither a valid gzip not tar
return false
}
return (await isGzipFile(fd))
? true // gzip files cannot be validated at this time
: await isValidTar(size, fd)
} finally {
fs.close(fd).catch(noop)
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}
const noop = Function.prototype
const readDir = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
entries[i] = `${path}/${entry}`
})
return entries
},
error => {
// a missing dir is by definition empty
if (error != null && error.code === 'ENOENT') {
return []
}
throw error
}
)
// -----------------------------------------------------------------------------
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
async function mergeVhdChain(chain) {
assert(chain.length >= 2)
const child = chain[0]
const parent = chain[chain.length - 1]
const children = chain.slice(0, -1).reverse()
console.warn('Unused parents of VHD', child)
chain
.slice(1)
.reverse()
.forEach(parent => {
console.warn(' ', parent)
})
force && console.warn(' merging…')
console.warn('')
if (force) {
// `mergeVhd` does not work with a stream, either
// - make it accept a stream
// - or create synthetic VHD which is not a stream
return console.warn('TODO: implement merge')
// await mergeVhd(
// handler,
// parent,
// handler,
// children.length === 1
// ? child
// : await createSyntheticStream(handler, children)
// )
}
await Promise.all([
force && fs.rename(parent, child),
asyncMap(children.slice(0, -1), child => {
console.warn('Unused VHD', child)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(child)
}),
])
}
const listVhds = pipe([
vmDir => vmDir + '/vdis',
readDir,
asyncMap(readDir),
flatten,
asyncMap(readDir),
flatten,
filter(_ => _.endsWith('.vhd')),
])
async function handleVm(vmDir) {
const vhds = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
// remove broken VHDs
await asyncMap(await listVhds(vmDir), async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error(
'this script does not support multiple VHD children'
)
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
} catch (error) {
console.warn('Error while checking VHD', path)
console.warn(' ', error)
if (error != null && error.code === 'ERR_ASSERTION') {
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(path))
}
}
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
console.warn('Error while checking VHD', vhd)
console.warn(' missing parent', parent)
force && console.warn(' deleting…')
console.warn('')
force && deletions.push(handler.unlink(vhd))
}
}
// > A property that is deleted before it has been visited will not be
// > visited later.
// >
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
for (const child in vhdParents) {
deleteIfOrphan(child)
}
await Promise.all(deletions)
}
const [jsons, xvas] = await readDir(vmDir).then(entries => [
entries.filter(_ => _.endsWith('.json')),
new Set(entries.filter(_ => _.endsWith('.xva'))),
])
await asyncMap(xvas, async path => {
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await isValidXva(path))) {
console.warn('Potential broken XVA', path)
console.warn('')
}
})
const unusedVhds = new Set(vhds)
const unusedXvas = new Set(xvas)
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await fs.readFile(json))
const { mode } = metadata
if (mode === 'full') {
const linkedXva = resolve(vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
console.warn('Error while checking backup', json)
console.warn(' missing file', linkedXva)
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
} else if (mode === 'delta') {
const linkedVhds = (() => {
const { vhds } = metadata
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
})()
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
} else {
console.warn('Error while checking backup', json)
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
console.warn(
' %i/%i missing VHDs',
missingVhds.length,
linkedVhds.length
)
missingVhds.forEach(vhd => {
console.warn(' ', vhd)
})
force && console.warn(' deleting…')
console.warn('')
force && (await handler.unlink(json))
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
const vhdChainsToMerge = { __proto__: null }
const toCheck = new Set(unusedVhds)
const getUsedChildChainOrDelete = vhd => {
if (vhd in vhdChainsToMerge) {
const chain = vhdChainsToMerge[vhd]
delete vhdChainsToMerge[vhd]
return chain
}
if (!unusedVhds.has(vhd)) {
return [vhd]
}
// no longer needs to be checked
toCheck.delete(vhd)
const child = vhdChildren[vhd]
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
return chain
}
}
console.warn('Unused VHD', vhd)
force && console.warn(' deleting…')
console.warn('')
force && unusedVhdsDeletion.push(handler.unlink(vhd))
}
toCheck.forEach(vhd => {
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
})
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain))
}
})
}
await Promise.all([
unusedVhdsDeletion,
asyncMap(unusedXvas, path => {
console.warn('Unused XVA', path)
force && console.warn(' deleting…')
console.warn('')
return force && handler.unlink(path)
}),
])
}
// -----------------------------------------------------------------------------
module.exports = async function main(args) {
const opts = getopts(args, {
alias: {
force: 'f',
},
boolean: ['force'],
default: {
force: false,
},
})
;({ force } = opts)
await asyncMap(opts._, async vmDir => {
vmDir = resolve(vmDir)
// TODO: implement this in `xo-server`, not easy because not compatible with
// `@xen-orchestra/fs`.
const release = await lockfile.lock(vmDir)
try {
await handleVm(vmDir)
} catch (error) {
console.error('handleVm', vmDir, error)
} finally {
await release()
}
})
}

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env node
require('./_composeCommands')({
'clean-vms': {
get main() {
return require('./commands/clean-vms')
},
usage: '[--force] xo-vm-backups/*',
},
})(process.argv.slice(2), 'xo-backups').catch(error => {
console.error('main', error)
process.exitCode = 1
})

View File

@@ -1,28 +0,0 @@
{
"bin": {
"xo-backups": "index.js"
},
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/fs": "^0.10.2",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.14.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^0.7.2"
},
"engines": {
"node": ">=7.10.1"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
"repository": {
"directory": "@xen-orchestra/backups-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.0.0"
}

View File

@@ -16,7 +16,7 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.27.3"
"xen-api": "^0.27.2"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/cron",
"version": "1.0.6",
"version": "1.0.4",
"license": "ISC",
"description": "Focused, well maintained, cron parser/scheduler",
"keywords": [
@@ -46,7 +46,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -5,21 +5,14 @@ import parse from './parse'
const MAX_DELAY = 2 ** 31 - 1
function nextDelay(schedule) {
const now = schedule._createDate()
return next(schedule._schedule, now) - now
}
class Job {
constructor(schedule, fn) {
let scheduledDate
const wrapper = () => {
const now = Date.now()
if (scheduledDate > now) {
// we're early, delay
//
// no need to check _isEnabled, we're just delaying the existing timeout
//
// see https://github.com/vatesfr/xen-orchestra/issues/4625
this._timeout = setTimeout(wrapper, scheduledDate - now)
return
}
this._isRunning = true
let result
@@ -39,9 +32,7 @@ class Job {
this._isRunning = false
if (this._isEnabled) {
const now = schedule._createDate()
scheduledDate = +next(schedule._schedule, now)
const delay = scheduledDate - now
const delay = nextDelay(schedule)
this._timeout =
delay < MAX_DELAY
? setTimeout(wrapper, delay)

View File

@@ -2,24 +2,12 @@
import { createSchedule } from './'
const wrap = value => () => value
describe('issues', () => {
let originalDateNow
beforeAll(() => {
originalDateNow = Date.now
})
afterAll(() => {
Date.now = originalDateNow
originalDateNow = undefined
})
test('stop during async execution', async () => {
let nCalls = 0
let resolve, promise
const schedule = createSchedule('* * * * *')
const job = schedule.createJob(() => {
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
@@ -30,7 +18,6 @@ describe('issues', () => {
})
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(1)
@@ -48,8 +35,7 @@ describe('issues', () => {
let nCalls = 0
let resolve, promise
const schedule = createSchedule('* * * * *')
const job = schedule.createJob(() => {
const job = createSchedule('* * * * *').createJob(() => {
++nCalls
// eslint-disable-next-line promise/param-names
@@ -60,7 +46,6 @@ describe('issues', () => {
})
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(1)
@@ -71,7 +56,6 @@ describe('issues', () => {
resolve()
await promise
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(2)
})

View File

@@ -1,13 +1,13 @@
# @xen-orchestra/defined [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/defined):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save @xen-orchestra/defined
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -34,7 +34,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -62,10 +62,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -33,7 +33,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/fs",
"version": "0.10.2",
"version": "0.10.1",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -18,19 +18,19 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@marsaud/smb2": "^0.14.0",
"@sindresorhus/df": "^3.1.1",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.5.0",
"execa": "^3.2.0",
"execa": "^1.0.0",
"fs-extra": "^8.0.1",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
@@ -46,7 +46,7 @@
"@babel/preset-flow": "^7.0.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"dotenv": "^8.0.0",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"

View File

@@ -389,7 +389,7 @@ export default class RemoteHandlerAbstract {
async test(): Promise<Object> {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
const data = await fromCallback(randomBytes, SIZE)
const data = await fromCallback(cb => randomBytes(SIZE, cb))
let step = 'write'
try {
const writeStart = process.hrtime()

View File

@@ -86,7 +86,7 @@ handlers.forEach(url => {
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
@@ -106,7 +106,7 @@ handlers.forEach(url => {
describe('#createWriteStream()', () => {
testWithFileDescriptor('file', 'wx', async ({ file, flags }) => {
const stream = await handler.createWriteStream(file, { flags })
await fromCallback(pipeline, createTestDataStream(), stream)
await fromCallback(cb => pipeline(createTestDataStream(), stream, cb))
await expect(await handler.readFile('file')).toEqual(TEST_DATA)
})

View File

@@ -47,19 +47,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
})
}
async _getInfo() {
// df.file() resolves with an object with the following properties:
// filesystem, type, size, used, available, capacity and mountpoint.
// size, used, available and capacity may be `NaN` so we remove any `NaN`
// value from the object.
const info = await df.file(this._getFilePath('/'))
Object.keys(info).forEach(key => {
if (Number.isNaN(info[key])) {
delete info[key]
}
})
return info
_getInfo() {
return df.file(this._getFilePath('/'))
}
async _getSize(file) {

View File

@@ -15,7 +15,7 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/log):
Everywhere something should be logged:
```js
import { createLogger } from '@xen-orchestra/log'
import createLogger from '@xen-orchestra/log'
const log = createLogger('my-module')
@@ -42,7 +42,6 @@ log.error('could not join server', {
Then, at application level, configure the logs are handled:
```js
import { createLogger } from '@xen-orchestra/log'
import { configure, catchGlobalErrors } from '@xen-orchestra/log/configure'
import transportConsole from '@xen-orchestra/log/transports/console'
import transportEmail from '@xen-orchestra/log/transports/email'
@@ -78,8 +77,8 @@ configure([
])
// send all global errors (uncaught exceptions, warnings, unhandled rejections)
// to this logger
catchGlobalErrors(createLogger('app'))
// to this transport
catchGlobalErrors(transport)
```
### Transports

View File

@@ -31,14 +31,14 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
},
@@ -48,7 +48,7 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,13 +1,13 @@
# @xen-orchestra/mixin [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/mixin):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save @xen-orchestra/mixin
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
ISC © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -36,7 +36,7 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-dev": "^1.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -28,7 +28,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -4,110 +4,6 @@
### Enhancements
- [Backup NG] Make report recipients configurable in the backup settings [#4581](https://github.com/vatesfr/xen-orchestra/issues/4581) (PR [#4646](https://github.com/vatesfr/xen-orchestra/pull/4646))
- [SAML] Setting to disable requested authentication context (helps with _Active Directory_) (PR [#4675](https://github.com/vatesfr/xen-orchestra/pull/4675))
- The default sign-in page can be configured via `authentication.defaultSignInPage` (PR [#4678](https://github.com/vatesfr/xen-orchestra/pull/4678))
- [SR] Allow import of VHD and VMDK disks [#4137](https://github.com/vatesfr/xen-orchestra/issues/4137) (PR [#4138](https://github.com/vatesfr/xen-orchestra/pull/4138) )
- [Host] Advanced Live Telemetry (PR [#4680](https://github.com/vatesfr/xen-orchestra/pull/4680))
### Bug fixes
- [Metadata backup] Add 10 minutes timeout to avoid stuck jobs [#4657](https://github.com/vatesfr/xen-orchestra/issues/4657) (PR [#4666](https://github.com/vatesfr/xen-orchestra/pull/4666))
- [Metadata backups] Fix out-of-date listing for 1 minute due to cache (PR [#4672](https://github.com/vatesfr/xen-orchestra/pull/4672))
- [Delta backup] Limit the number of merged deltas per run to avoid interrupted jobs (PR [#4674](https://github.com/vatesfr/xen-orchestra/pull/4674))
### Released packages
- vhd-lib v0.7.2
- xo-vmdk-to-vhd v0.1.8
- xo-server-auth-ldap v0.6.6
- xo-server-auth-saml v0.7.0
- xo-server-backup-reports v0.16.4
- @xen-orchestra/fs v0.10.2
- xo-server v5.53.0
- xo-web v5.53.1
## **5.40.2** (2019-11-22)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [Logs] Ability to report a bug with attached log (PR [#4201](https://github.com/vatesfr/xen-orchestra/pull/4201))
- [Backup] Reduce _VDI chain protection error_ occurrence by being more tolerant (configurable via `xo-server`'s `xapiOptions.maxUncoalescedVdis` setting) [#4124](https://github.com/vatesfr/xen-orchestra/issues/4124) (PR [#4651](https://github.com/vatesfr/xen-orchestra/pull/4651))
- [Plugin] [Web hooks](https://xen-orchestra.com/docs/web-hooks.html) [#1946](https://github.com/vatesfr/xen-orchestra/issues/1946) (PR [#3155](https://github.com/vatesfr/xen-orchestra/pull/3155))
- [Tables] Always put the tables' search in the URL [#4542](https://github.com/vatesfr/xen-orchestra/issues/4542) (PR [#4637](https://github.com/vatesfr/xen-orchestra/pull/4637))
### Bug fixes
- [SDN controller] Prevent private network creation on bond slave PIF (Fixes https://github.com/xcp-ng/xcp/issues/300) (PR [4633](https://github.com/vatesfr/xen-orchestra/pull/4633))
- [Metadata backup] Fix failed backup reported as successful [#4596](https://github.com/vatesfr/xen-orchestra/issues/4596) (PR [#4598](https://github.com/vatesfr/xen-orchestra/pull/4598))
- [Backup NG] Fix "task cancelled" error when the backup job timeout exceeds 596 hours [#4662](https://github.com/vatesfr/xen-orchestra/issues/4662) (PR [#4663](https://github.com/vatesfr/xen-orchestra/pull/4663))
- Fix `promise rejected with non-error` warnings in logs (PR [#4659](https://github.com/vatesfr/xen-orchestra/pull/4659))
### Released packages
- xo-server-web-hooks v0.1.0
- xen-api v0.27.3
- xo-server-backup-reports v0.16.3
- vhd-lib v0.7.1
- xo-server v5.52.1
- xo-web v5.52.0
## **5.40.1** (2019-10-29)
### Bug fixes
- [XOSAN] Fix "Install Cloud plugin" warning (PR [#4631](https://github.com/vatesfr/xen-orchestra/pull/4631))
### Released packages
- xo-web v5.51.1
## **5.40.0** (2019-10-29)
### Breaking changes
- `xo-server` requires Node 8.
### Highlights
- [Backup NG] Offline backup feature [#3449](https://github.com/vatesfr/xen-orchestra/issues/3449) (PR [#4470](https://github.com/vatesfr/xen-orchestra/pull/4470))
- [Menu] Remove legacy backup entry [#4467](https://github.com/vatesfr/xen-orchestra/issues/4467) (PR [#4476](https://github.com/vatesfr/xen-orchestra/pull/4476))
- [Hub] Ability to update existing template (PR [#4613](https://github.com/vatesfr/xen-orchestra/pull/4613))
- [Support] Ability to open and close support tunnel from the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4616](https://github.com/vatesfr/xen-orchestra/pull/4616))
### Enhancements
- [Hub] Ability to select SR in hub VM installation (PR [#4571](https://github.com/vatesfr/xen-orchestra/pull/4571))
- [Hub] Display more info about downloadable templates (PR [#4593](https://github.com/vatesfr/xen-orchestra/pull/4593))
- [xo-server-transport-icinga2] Add support of [icinga2](https://icinga.com/docs/icinga2/latest/doc/12-icinga2-api/) for reporting services status [#4563](https://github.com/vatesfr/xen-orchestra/issues/4563) (PR [#4573](https://github.com/vatesfr/xen-orchestra/pull/4573))
### Bug fixes
- [SR] Fix `[object HTMLInputElement]` name after re-attaching a SR [#4546](https://github.com/vatesfr/xen-orchestra/issues/4546) (PR [#4550](https://github.com/vatesfr/xen-orchestra/pull/4550))
- [Schedules] Prevent double runs [#4625](https://github.com/vatesfr/xen-orchestra/issues/4625) (PR [#4626](https://github.com/vatesfr/xen-orchestra/pull/4626))
- [Schedules] Properly enable/disable on config import (PR [#4624](https://github.com/vatesfr/xen-orchestra/pull/4624))
### Released packages
- @xen-orchestra/cron v1.0.6
- xo-server-transport-icinga2 v0.1.0
- xo-server-sdn-controller v0.3.1
- xo-server v5.51.1
- xo-web v5.51.0
### Dropped packages
- xo-server-cloud : this package was useless for OpenSource installations because it required a complete XOA environment
## **5.39.1** (2019-10-11)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Enhancements
- [Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
### Bug fixes
@@ -122,6 +18,8 @@
## **5.39.0** (2019-09-30)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Highlights
- [VM/console] Add a button to connect to the VM via the local SSH client (PR [#4415](https://github.com/vatesfr/xen-orchestra/pull/4415))
@@ -175,6 +73,8 @@
## **5.38.0** (2019-08-29)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Enhancements
- [VM/Attach disk] Display confirmation modal when VDI is already attached [#3381](https://github.com/vatesfr/xen-orchestra/issues/3381) (PR [#4366](https://github.com/vatesfr/xen-orchestra/pull/4366))

View File

@@ -7,12 +7,14 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
[Support] Ability to check the XOA on the user interface [#4513](https://github.com/vatesfr/xen-orchestra/issues/4513) (PR [#4574](https://github.com/vatesfr/xen-orchestra/pull/4574))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Host] Fix Enable Live Telemetry button state (PR [#4686](https://github.com/vatesfr/xen-orchestra/pull/4686))
- [Host] Fix Advanced Live Telemetry URL (PR [#4687](https://github.com/vatesfr/xen-orchestra/pull/4687))
- [VM/new-vm] Fix template selection on creating new VM for resource sets [#4565](https://github.com/vatesfr/xen-orchestra/issues/4565) (PR [#4568](https://github.com/vatesfr/xen-orchestra/pull/4568))
- [VM] Clearer invalid cores per socket error [#4120](https://github.com/vatesfr/xen-orchestra/issues/4120) (PR [#4187](https://github.com/vatesfr/xen-orchestra/pull/4187))
### Released packages
@@ -21,5 +23,5 @@
>
> Rule of thumb: add packages on top.
- xo-server v5.54.0
- xo-web v5.54.0
- xo-server v5.51.0
- xo-web v5.51.0

View File

@@ -51,7 +51,6 @@
* [Health](health.md)
* [Job manager](scheduler.md)
* [Alerts](alerts.md)
* [Web hooks](web-hooks.md)
* [Load balancing](load_balancing.md)
* [Emergency Shutdown](emergency_shutdown.md)
* [Auto scalability](auto_scalability.md)

View File

@@ -22,7 +22,7 @@ group = 'nogroup'
By default, XO-server listens on all addresses (0.0.0.0) and runs on port 80. If you need to, you can change this in the `# Basic HTTP` section:
```toml
hostname = '0.0.0.0'
host = '0.0.0.0'
port = 80
```
@@ -31,7 +31,7 @@ port = 80
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
```toml
hostname = '0.0.0.0'
host = '0.0.0.0'
port = 443
certificate = './certificate.pem'
key = './key.pem'
@@ -43,10 +43,10 @@ key = './key.pem'
If you want to redirect everything to HTTPS, you can modify the configuration like this:
```toml
```
# If set to true, all HTTP traffic will be redirected to the first HTTPs configuration.
redirectToHttps = true
redirectToHttps: true
```
This should be written just before the `mount` option, inside the `http:` block.

View File

@@ -20,7 +20,7 @@ We'll consider at this point that you've got a working node on your box. E.g:
```
$ node -v
v8.16.2
v8.12.0
```
If not, see [this page](https://nodejs.org/en/download/package-manager/) for instructions on how to install Node.
@@ -65,13 +65,17 @@ Now you have to create a config file for `xo-server`:
```
$ cd packages/xo-server
$ mkdir -p ~/.config/xo-server
$ cp sample.config.toml ~/.config/xo-server/config.toml
$ cp sample.config.toml .xo-server.toml
```
> Note: If you're installing `xo-server` as a global service, you may want to copy the file to `/etc/xo-server/config.toml` instead.
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory):
In this config file, you can change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
```toml
[http.mounts]
'/' = '../xo-web/dist/'
```
In this config file, you can also change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
You can try to start xo-server to see if it works. You should have something like this:
@@ -182,7 +186,7 @@ service redis start
## SUDO
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server` configuration file and setting `useSudo = true`. It's near the end of the file:
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server/.xo-server.toml` and setting `useSudo = true`. It's near the end of the file:
```
useSudo = true

View File

@@ -1,72 +0,0 @@
# Web hooks
⚠ This feature is experimental!
## Configuration
The plugin "web-hooks" needs to be installed and loaded for this feature to work.
You can trigger an HTTP POST request to a URL when a Xen Orchestra API method is called.
* Go to Settings > Plugins > Web hooks
* Add new hooks
* For each hook, configure:
* Method: the XO API method that will trigger the HTTP request when called
* Type:
* pre: the request will be sent when the method is called
* post: the request will be sent after the method action is completed
* pre/post: both
* URL: the full URL which the requests will be sent to
* Save the plugin configuration
From now on, a request will be sent to the corresponding URLs when a configured method is called by an XO client.
## Request content
```
POST / HTTP/1.1
Content-Type: application/json
```
The request's body is a JSON string representing an object with the following properties:
- `type`: `"pre"` or `"post"`
- `callId`: unique ID for this call to help match a pre-call and a post-call
- `userId`: unique internal ID of the user who performed the call
- `userName`: login/e-mail address of the user who performed the call
- `method`: name of the method that was called (e.g. `"vm.start"`)
- `params`: call parameters (object)
- `timestamp`: epoch timestamp of the beginning ("pre") or end ("post") of the call in ms
- `duration`: duration of the call in ms ("post" hooks only)
- `result`: call result on success ("post" hooks only)
- `error`: call result on error ("post" hooks only)
## Request handling
*Quick Node.js example of how you may want to handle the requests*
```js
const http = require('http')
const { exec } = require('child_process')
http
.createServer((req, res) => {
let body = ''
req.on('data', chunk => {
body += chunk
})
req.on('end', () => handleHook(body))
res.end()
})
.listen(3000)
const handleHook = data => {
const { method, params, type, result, error, timestamp } = JSON.parse(data)
// Log it
console.log(`${new Date(timestamp).toISOString()} [${method}|${type}] ${params}${result || error}`)
// Run scripts
exec(`./hook-scripts/${method}-${type}.sh`)
}
```

View File

@@ -22,9 +22,9 @@ For use on huge infrastructure (more than 500+ VMs), feel free to increase the R
### The quickest way
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go to https://xen-orchestra.com/#!/xoa and follow the instructions.
The **fastest and most secure way** to install Xen Orchestra is to use our web deploy page. Go on https://xen-orchestra.com/#!/xoa and follow instructions.
> **Note:** no data will be sent to our servers, the deployment only runs between your browser and your host!
> **Note:** no data will be sent to our servers, it's running only between your browser and your host!
![](./assets/deploy_form.png)
@@ -41,12 +41,12 @@ bash -c "$(curl -s http://xoa.io/deploy)"
Follow the instructions:
* Your IP configuration will be requested: it's set to **DHCP by default**, otherwise you can enter a fixed IP address (eg `192.168.0.10`)
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS server should be provided.
* If DHCP is selected, the script will continue automatically. Otherwise a netmask, gateway, and DNS should be provided.
* XOA will be deployed on your default storage repository. You can move it elsewhere anytime after.
### Via a manual XVA download
### Via download the XVA
You can also download XOA from xen-orchestra.com in an XVA file. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
Download XOA from xen-orchestra.com. Once you've got the XVA file, you can import it with `xe vm-import filename=xoa_unified.xva` or via XenCenter.
After the VM is imported, you just need to start it with `xe vm-start vm="XOA"` or with XenCenter.
@@ -64,35 +64,6 @@ Once you have started the VM, you can access the web UI by putting the IP you co
**The first thing** you need to do with your XOA is register. [Read the documentation on the page dedicated to the updater/register inferface](updater.md).
## Technical Support
In your appliance, you can access the support section in the XOA menu. In this section you can:
* launch an `xoa check` command
![](https://xen-orchestra.com/blog/content/images/2019/10/xoacheck.png)
* Open a secure support tunnel so our team can remotely investigate
![](https://user-images.githubusercontent.com/10992860/67384755-10f47f80-f592-11e9-974d-bbdefd0bf353.gif)
<a id="ssh-pro-support"></a>
If your web UI is not working, you can also open the secure support tunnel from the CLI. To open a private tunnel (we are the only one with the private key), you can use the command `xoa support tunnel` like below:
```
$ xoa support tunnel
The support tunnel has been created.
Do not stop this command before the intervention is over!
Give this id to the support: 40713
```
Give us this number, and we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
> The tunnel utilizes the user `xoa-support`. If you want to deactivate this bundled user, you can run `chage -E 0 xoa-support`. To re-activate this account, you must run `chage -E 1 xoa-support`.
### First console connection
If you connect via SSH or console, the default credentials are:
@@ -185,6 +156,21 @@ You can access the VM console through XenCenter or using VNC through a SSH tunne
If you want to go back in DHCP, just run `xoa network dhcp`
### SSH Pro Support
By default, if you need support, there is a dedicated user named `xoa-support`. We are the only one with the private key. If you want our assistance on your XOA, you can open a private tunnel with the command `xoa support tunnel` like below:
```
$ xoa support tunnel
The support tunnel has been created.
Do not stop this command before the intervention is over!
Give this id to the support: 40713
```
Give us this number, we'll be able to access your XOA in a secure manner. Then, close the tunnel with `Ctrl+C` after your issue has been solved by support.
> If you want to deactivate this bundled user, you can type `chage -E 0 xoa-support`. To re-activate this account, you must use the `chage -E 1 xoa-support`.
### Firewall

View File

@@ -12,18 +12,18 @@
"eslint-config-standard-jsx": "^8.1.0",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^10.0.0",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.112.0",
"flow-bin": "^0.106.3",
"globby": "^10.0.0",
"husky": "^3.0.0",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"sorted-object": "^2.0.1"
},
"engines": {
@@ -60,7 +60,6 @@
"posttest": "scripts/run-script test",
"prepare": "scripts/run-script prepare",
"pretest": "eslint --ignore-path .gitignore .",
"prettify": "prettier --ignore-path .gitignore --write '**/*.{js,jsx,md,mjs,ts,tsx}'",
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
"test-integration": "jest \".integ\\.spec\\.js$\"",
"travis-tests": "scripts/travis-tests"

View File

@@ -35,7 +35,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.1",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -33,7 +33,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -24,25 +24,25 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.2",
"@xen-orchestra/fs": "^0.10.1",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.7.2"
"vhd-lib": "^0.7.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"execa": "^3.2.0",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"rimraf": "^3.0.0",
"tmp": "^0.1.0"
},

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-lib",
"version": "0.7.2",
"version": "0.7.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -18,17 +18,15 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -37,10 +35,10 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.2",
"@xen-orchestra/fs": "^0.10.1",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"execa": "^3.2.0",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"index-modules": "^0.3.0",

View File

@@ -17,7 +17,10 @@ export default async function readChunk(stream, n) {
resolve(Buffer.concat(chunks, i))
}
const onEnd = resolve2
function onEnd() {
resolve2()
clean()
}
function onError(error) {
reject(error)
@@ -31,11 +34,8 @@ export default async function readChunk(stream, n) {
}
i += chunk.length
chunks.push(chunk)
if (i === n) {
if (i >= n) {
resolve2()
} else if (i > n) {
throw new RangeError(`read (${i}) more than expected (${n})`)
}
}

View File

@@ -29,13 +29,13 @@ export default asyncIteratorToStream(async function*(size, blockParser) {
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.logicalAddressBytes - position
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield* filePadding(paddingLength)
yield next.data
position = next.logicalAddressBytes + next.data.length
position = next.offsetBytes + next.data.length
}
yield* filePadding(actualSize - position)
yield footer

View File

@@ -1,6 +1,5 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forEachRight } from 'lodash'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
@@ -18,65 +17,38 @@ import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* Looks once backwards to collect the last fragment of each VHD block (they could be interleaved),
* then allocates the blocks in a forwards pass.
* @returns currentVhdPositionSector the first free sector after the data
*/
function createBAT(
firstBlockPosition,
fragmentLogicAddressList,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
const lastFragmentPerBlock = new Map()
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
)
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
// lastFragmentPerBlock is from last to first, so we go the other way around
forEachRight(
lastFragmentPerBlockArray,
([vhdTableIndex, _fragmentVirtualAddress]) => {
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
}
)
return [currentVhdPositionSector, lastFragmentPerBlock]
return currentVhdPositionSector
}
/**
* Receives an iterator of constant sized fragments, and a list of their address in virtual space, and returns
* a stream representing the VHD file of this disk.
* The fragment size should be an integer divider of the VHD block size.
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
* @param diskSize
* @param fragmentSize
* @param fragmentLogicalAddressList
* @param fragmentIterator
* @returns {Promise<Function>}
*/
export default async function createReadableStream(
diskSize,
fragmentSize,
fragmentLogicalAddressList,
fragmentIterator
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${fragmentSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
@@ -108,72 +80,60 @@ export default async function createReadableStream(
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
const [endOfData, lastFragmentPerBlock] = createBAT(
const endOfData = createBAT(
firstBlockPosition,
fragmentLogicalAddressList,
blockAddressList,
ratio,
bat,
bitmapSize
)
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
let position = 0
function* yieldAndTrack(buffer, expectedPosition, reason) {
function* yieldAndTrack(buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition, reason)
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
function insertFragmentInBlock(fragment, blockWithBitmap) {
const fragmentOffsetInBlock =
(fragment.logicalAddressBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(blockWithBitmap, fragmentOffsetInBlock + bitPos)
}
fragment.data.copy(
blockWithBitmap,
bitmapSize + (fragment.logicalAddressBytes % VHD_BLOCK_SIZE_BYTES)
)
}
async function* generateBlocks(fragmentIterator, bitmapSize) {
let currentFragmentIndex = -1
// store blocks waiting for some of their fragments.
const batIndexToBlockMap = new Map()
for await (const fragment of fragmentIterator) {
currentFragmentIndex++
const batIndex = Math.floor(
fragment.logicalAddressBytes / VHD_BLOCK_SIZE_BYTES
)
let currentBlockWithBitmap = batIndexToBlockMap.get(batIndex)
if (currentBlockWithBitmap === undefined) {
async function* generateFileContent(blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield* yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
batIndexToBlockMap.set(batIndex, currentBlockWithBitmap)
currentVhdBlockIndex = batIndex
}
insertFragmentInBlock(fragment, currentBlockWithBitmap)
const batEntry = bat.readUInt32BE(batIndex * 4)
assert.notStrictEqual(batEntry, BLOCK_UNUSED)
const batPosition = batEntry * SECTOR_SIZE
if (lastFragmentPerBlock.get(batIndex) === fragment.logicalAddressBytes) {
batIndexToBlockMap.delete(batIndex)
yield* yieldAndTrack(
currentBlockWithBitmap,
batPosition,
`VHD block start index: ${currentFragmentIndex}`
)
const blockOffset =
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + (next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
)
}
yield* yieldAndTrack(currentBlockWithBitmap)
}
async function* iterator() {
yield* yieldAndTrack(footer, 0)
yield* yieldAndTrack(header, FOOTER_SIZE)
yield* yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield* generateBlocks(fragmentIterator, bitmapSize)
yield* generateFileContent(blockIterator, bitmapSize, ratio)
yield* yieldAndTrack(footer)
}

View File

@@ -1,5 +1,4 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { createLogger } from '@xen-orchestra/log'
import resolveRelativeFromFile from './_resolveRelativeFromFile'
@@ -14,23 +13,18 @@ import {
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const { warn } = createLogger('vhd-lib:createSyntheticStream')
export default async function createSyntheticStream(handler, paths) {
export default async function createSyntheticStream(handler, path) {
const fds = []
const cleanup = () => {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
warn('error while closing file', {
error,
fd: fds[i],
})
console.warn('createReadStream, closeFd', i, error)
})
}
}
try {
const vhds = []
const open = async path => {
while (true) {
const fd = await handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
@@ -38,18 +32,11 @@ export default async function createSyntheticStream(handler, paths) {
await vhd.readHeaderAndFooter()
await vhd.readBlockAllocationTable()
return vhd
}
if (typeof paths === 'string') {
let path = paths
let vhd
while ((vhd = await open(path)).footer.diskType !== DISK_TYPE_DYNAMIC) {
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
} else {
for (const path of paths) {
await open(path)
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length

View File

@@ -6,8 +6,11 @@ export { default as chainVhd } from './chain'
export { default as checkVhdChain } from './checkChain'
export { default as createContentStream } from './createContentStream'
export { default as createReadableRawStream } from './createReadableRawStream'
export { default as createReadableSparseStream } from './createReadableSparseStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'
export { default as createVhdStreamWithLength } from './createVhdStreamWithLength'
export { default as peekFooterFromVhdStream } from './peekFooterFromVhdStream'
export {
default as createVhdStreamWithLength,
} from './createVhdStreamWithLength'

View File

@@ -1,10 +0,0 @@
import readChunk from './_readChunk'
import { FOOTER_SIZE } from './_constants'
import { fuFooter } from './_structs'
export default async function peekFooterFromStream(stream) {
const footerBuffer = await readChunk(stream, FOOTER_SIZE)
const footer = fuFooter.unpack(footerBuffer)
stream.unshift(footerBuffer)
return footer
}

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import { createLogger } from '@xen-orchestra/log'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
@@ -16,7 +15,10 @@ import {
SECTOR_SIZE,
} from './_constants'
const { debug } = createLogger('vhd-lib:Vhd')
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
@@ -38,11 +40,9 @@ const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
assert.strictEqual(
actual,
expected,
`invalid ${name} checksum ${actual}, expected ${expected}`
)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// unused block as buffer containing a uint32BE
@@ -102,7 +102,7 @@ export default class Vhd {
}
// Returns the first address after metadata. (In bytes)
_getEndOfHeaders() {
getEndOfHeaders() {
const { header } = this
let end = FOOTER_SIZE + HEADER_SIZE
@@ -127,8 +127,8 @@ export default class Vhd {
}
// Returns the first sector after data.
_getEndOfData() {
let end = Math.ceil(this._getEndOfHeaders() / SECTOR_SIZE)
getEndOfData() {
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
@@ -309,8 +309,8 @@ export default class Vhd {
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async _createBlock(blockId) {
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
async createBlock(blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
@@ -325,7 +325,7 @@ export default class Vhd {
}
// Write a bitmap at a block address.
async _writeBlockBitmap(blockAddr, bitmap) {
async writeBlockBitmap(blockAddr, bitmap) {
const { bitmapSize } = this
if (bitmap.length !== bitmapSize) {
@@ -342,20 +342,20 @@ export default class Vhd {
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async _writeEntireBlock(block) {
async writeEntireBlock(block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this._createBlock(block.id)
blockAddr = await this.createBlock(block.id)
}
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
async _writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
async writeBlockSectors(block, beginSectorId, endSectorId, parentBitmap) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this._createBlock(block.id)
blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
@@ -364,14 +364,14 @@ export default class Vhd {
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`_writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(parentBitmap, i)
}
await this._writeBlockBitmap(blockAddr, parentBitmap)
await this.writeBlockBitmap(blockAddr, parentBitmap)
await this._write(
block.data.slice(
sectorsToBytes(beginSectorId),
@@ -407,12 +407,12 @@ export default class Vhd {
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this._writeEntireBlock(block)
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
await this.writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
@@ -429,7 +429,7 @@ export default class Vhd {
const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this._getEndOfData(), eof - rawFooter.length)
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(
@@ -500,7 +500,7 @@ export default class Vhd {
endInBuffer
)
}
await this._writeBlockSectors(
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
@@ -509,7 +509,7 @@ export default class Vhd {
await this.writeFooter()
}
async _ensureSpaceForParentLocators(neededSectors) {
async ensureSpaceForParentLocators(neededSectors) {
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
@@ -528,7 +528,7 @@ export default class Vhd {
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this._ensureSpaceForParentLocators(dataSpaceSectors)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE

View File

@@ -31,11 +31,11 @@ test('createFooter() does not crash', () => {
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
logicalAddressBytes: 100,
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 700,
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -62,11 +62,11 @@ test('ReadableRawVHDStream does not crash', async () => {
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
logicalAddressBytes: 700,
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: 100,
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
@@ -97,11 +97,11 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
logicalAddressBytes: blockSize * 3,
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
logicalAddressBytes: blockSize * 100,
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
@@ -109,7 +109,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const stream = await createReadableSparseStream(
fileSize,
blockSize,
blocks.map(b => b.logicalAddressBytes),
blocks.map(b => b.offsetBytes),
blocks
)
expect(stream.length).toEqual(4197888)
@@ -128,7 +128,7 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
const out1 = await readFile(`${tempDir}/out1.raw`)
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.logicalAddressBytes)
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

View File

@@ -36,19 +36,19 @@
},
"dependencies": {
"archy": "^1.0.0",
"chalk": "^3.0.0",
"chalk": "^2.3.2",
"exec-promise": "^0.7.0",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.27.3"
"xen-api": "^0.27.2"
},
"devDependencies": {
"@babel/cli": "^7.1.5",
"@babel/core": "^7.1.5",
"@babel/preset-env": "^7.1.5",
"babel-plugin-lodash": "^3.2.11",
"cross-env": "^6.0.3",
"cross-env": "^5.1.4",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -4,7 +4,6 @@ process.env.DEBUG = '*'
const defer = require('golike-defer').default
const { CancelToken } = require('promise-toolbox')
const { createVhdStreamWithLength } = require('vhd-lib')
const { createClient } = require('../')
@@ -33,13 +32,8 @@ defer(async ($defer, args) => {
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
let input = createInputStream(args[2])
if (!raw && input.length === undefined) {
input = await createVhdStreamWithLength(input)
}
// https://xapi-project.github.io/xen-api/snapshots.html#uploading-a-disk-or-snapshot
await xapi.putResource(token, input, '/import_raw_vdi/', {
await xapi.putResource(token, createInputStream(args[2]), '/import_raw_vdi/', {
query: {
format: raw ? 'raw' : 'vhd',
vdi: await resolveRef(xapi, 'VDI', args[1])

View File

@@ -2,28 +2,6 @@
"requires": true,
"lockfileVersion": 1,
"dependencies": {
"@xen-orchestra/log": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/@xen-orchestra/log/-/log-0.2.0.tgz",
"integrity": "sha512-xNseJ/TIUdASm9uxr0zVvg8qDG+Xw6ycJy4dag+e1yl6pEr77GdPJD2R0JbE1BbZwup/Skh3TEh6L0GV+9NRdQ==",
"requires": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0"
}
},
"async-iterator-to-stream": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/async-iterator-to-stream/-/async-iterator-to-stream-1.1.0.tgz",
"integrity": "sha512-ddF3u7ipixenFJsYCKqVR9tNdkIzd2j7JVg8QarqkfUl7UTR7nhJgc1Q+3ebP/5DNFhV9Co9F47FJjGpdc0PjQ==",
"requires": {
"readable-stream": "^3.0.5"
}
},
"core-js": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/core-js/-/core-js-3.4.1.tgz",
"integrity": "sha512-KX/dnuY/J8FtEwbnrzmAjUYgLqtk+cxM86hfG60LGiW3MmltIc2yAmDgBgEkfm0blZhUrdr1Zd84J2Y14mLxzg=="
},
"core-util-is": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
@@ -46,41 +24,6 @@
"node-gyp-build": "^3.7.0"
}
},
"from2": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
"integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
"requires": {
"inherits": "^2.0.1",
"readable-stream": "^2.0.0"
},
"dependencies": {
"readable-stream": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
"integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
"requires": {
"core-util-is": "~1.0.0",
"inherits": "~2.0.3",
"isarray": "~1.0.0",
"process-nextick-args": "~2.0.0",
"safe-buffer": "~5.1.1",
"string_decoder": "~1.1.1",
"util-deprecate": "~1.0.1"
}
}
}
},
"fs-extra": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
"integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
"requires": {
"graceful-fs": "^4.2.0",
"jsonfile": "^4.0.0",
"universalify": "^0.1.0"
}
},
"getopts": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/getopts/-/getopts-2.2.5.tgz",
@@ -91,11 +34,6 @@
"resolved": "https://registry.npmjs.org/golike-defer/-/golike-defer-0.4.1.tgz",
"integrity": "sha512-x8cq/Fvu32T8cnco3CBDRF+/M2LFmfSIysKfecX09uIK3cFdHcEKBTPlPnEO6lwrdxfjkOIU6dIw3EIlEJeS1A=="
},
"graceful-fs": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
"integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
},
"human-format": {
"version": "0.10.1",
"resolved": "https://registry.npmjs.org/human-format/-/human-format-0.10.1.tgz",
@@ -111,24 +49,6 @@
"resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
"integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
},
"jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
"integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
"requires": {
"graceful-fs": "^4.1.6"
}
},
"limit-concurrency-decorator": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/limit-concurrency-decorator/-/limit-concurrency-decorator-0.4.0.tgz",
"integrity": "sha512-hXGTuCkYjosfHT1D7dcPKzPHSGwBtZfN0wummzDwxi5A3ZUNBB75qM8phKEjQGlQGAfYrMW/JqhbaljO3xOH0A=="
},
"lodash": {
"version": "4.17.15",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
"integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
},
"make-error": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
@@ -221,11 +141,6 @@
"safe-buffer": "~5.1.0"
}
},
"struct-fu": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/struct-fu/-/struct-fu-1.2.1.tgz",
"integrity": "sha512-QrtfoBRe+RixlBJl852/Gu7tLLTdx3kWs3MFzY1OHNrSsYYK7aIAnzqsncYRWrKGG/QSItDmOTlELMxehw4Gjw=="
},
"throttle": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/throttle/-/throttle-1.0.3.tgz",
@@ -260,47 +175,11 @@
}
}
},
"universalify": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
},
"util-deprecate": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
},
"uuid": {
"version": "3.3.3",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz",
"integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ=="
},
"vhd-lib": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/vhd-lib/-/vhd-lib-0.7.1.tgz",
"integrity": "sha512-TODzo7KjtNzYF/NuJjE5bPeGyXZIUzAOVJvED1dcPXr8iSnS6/U5aNdtKahBVwukEzf0/x+Cu3GMYutV4/cxsQ==",
"requires": {
"@xen-orchestra/log": "^0.2.0",
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.14.0",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
"dependencies": {
"promise-toolbox": {
"version": "0.14.0",
"resolved": "https://registry.npmjs.org/promise-toolbox/-/promise-toolbox-0.14.0.tgz",
"integrity": "sha512-VV5lXK4lXaPB9oBO50ope1qd0AKN8N3nK14jYvV9/qFmfZW2Px/bJjPZBniGjXcIJf6J5Y/coNgJtPHDyiUV/g==",
"requires": {
"make-error": "^1.3.2"
}
}
}
},
"xtend": {
"version": "4.0.2",
"resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",

View File

@@ -7,7 +7,6 @@
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^0.7.2"
"throttle": "^1.0.3"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.27.3",
"version": "0.27.2",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -46,7 +46,7 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -60,7 +60,7 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -8,7 +8,7 @@ import execPromise from 'exec-promise'
import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find } from 'lodash'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
@@ -110,7 +110,7 @@ const main = async args => {
asCallback.call(
fromCallback(cb => {
evaluate.call(repl, cmd, context, filename, cb)
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
}).then(value => (isArray(value) ? Promise.all(value) : value)),
cb
)
})(repl.eval)

View File

@@ -4,7 +4,7 @@ import kindOf from 'kindof'
import ms from 'ms'
import httpRequest from 'http-request-plus'
import { EventEmitter } from 'events'
import { map, noop, omit } from 'lodash'
import { isArray, map, noop, omit } from 'lodash'
import {
cancelable,
defer,
@@ -25,6 +25,7 @@ import isReadOnlyCall from './_isReadOnlyCall'
import makeCallSetting from './_makeCallSetting'
import parseUrl from './_parseUrl'
import replaceSensitiveValues from './_replaceSensitiveValues'
import XapiError from './_XapiError'
// ===================================================================
@@ -112,7 +113,7 @@ export class Xapi extends EventEmitter {
this._watchedTypes = undefined
const { watchEvents } = opts
if (watchEvents !== false) {
if (Array.isArray(watchEvents)) {
if (isArray(watchEvents)) {
this._watchedTypes = watchEvents
}
this.watchEvents()
@@ -625,7 +626,9 @@ export class Xapi extends EventEmitter {
kindOf(result)
)
return result
} catch (error) {
} catch (e) {
const error = e instanceof Error ? e : XapiError.wrap(e)
// do not log the session ID
//
// TODO: should log at the session level to avoid logging sensitive
@@ -740,9 +743,9 @@ export class Xapi extends EventEmitter {
// the event loop in that case
if (this._pool.$ref !== oldPoolRef) {
// Uses introspection to list available types.
const types = (this._types = (
await this._interruptOnDisconnect(this._call('system.listMethods'))
)
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }
@@ -1072,7 +1075,7 @@ export class Xapi extends EventEmitter {
const $field = (field in RESERVED_FIELDS ? '$$' : '$') + field
const value = data[field]
if (Array.isArray(value)) {
if (isArray(value)) {
if (value.length === 0 || isOpaqueRef(value[0])) {
getters[$field] = function() {
const value = this[field]

View File

@@ -1,8 +1,6 @@
import httpRequestPlus from 'http-request-plus'
import { format, parse } from 'json-rpc-protocol'
import XapiError from '../_XapiError'
import UnsupportedTransport from './_UnsupportedTransport'
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
@@ -32,7 +30,7 @@ export default ({ allowUnauthorized, url }) => {
return response.result
}
throw XapiError.wrap(response.error)
throw response.error
},
error => {
if (error.response !== undefined) {

View File

@@ -1,8 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import XapiError from '../_XapiError'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
import UnsupportedTransport from './_UnsupportedTransport'
@@ -35,7 +33,7 @@ const parseResult = result => {
}
if (status !== 'Success') {
throw XapiError.wrap(result.ErrorDescription)
throw result.ErrorDescription
}
const value = result.Value

View File

@@ -1,8 +1,6 @@
import { createClient, createSecureClient } from 'xmlrpc'
import { promisify } from 'promise-toolbox'
import XapiError from '../_XapiError'
import prepareXmlRpcParams from './_prepareXmlRpcParams'
const logError = error => {
@@ -28,7 +26,7 @@ const parseResult = result => {
}
if (status !== 'Success') {
throw XapiError.wrap(result.ErrorDescription)
throw result.ErrorDescription
}
return result.Value

View File

@@ -26,28 +26,28 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "^7.0.0",
"bluebird": "^3.5.1",
"chalk": "^3.0.0",
"chalk": "^2.2.0",
"exec-promise": "^0.7.0",
"fs-promise": "^2.0.3",
"http-request-plus": "^0.8.0",
"human-format": "^0.10.0",
"l33teral": "^3.0.3",
"lodash": "^4.17.4",
"micromatch": "^4.0.2",
"micromatch": "^3.1.3",
"mkdirp": "^0.5.1",
"nice-pipe": "0.0.0",
"pretty-ms": "^5.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^3.0.0",
"xdg-basedir": "^4.0.0",
"strip-indent": "^2.0.0",
"xdg-basedir": "^3.0.0",
"xo-lib": "^0.9.0"
},
"devDependencies": {
@@ -56,7 +56,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -7,6 +7,7 @@ const promisify = require('bluebird').promisify
const readFile = promisify(require('fs').readFile)
const writeFile = promisify(require('fs').writeFile)
const assign = require('lodash/assign')
const l33t = require('l33teral')
const mkdirp = promisify(require('mkdirp'))
const xdgBasedir = require('xdg-basedir')
@@ -40,7 +41,7 @@ const save = (exports.save = function(config) {
exports.set = function(data) {
return load().then(function(config) {
return save(Object.assign(config, data))
return save(assign(config, data))
})
}

View File

@@ -17,6 +17,7 @@ const getKeys = require('lodash/keys')
const hrp = require('http-request-plus').default
const humanFormat = require('human-format')
const identity = require('lodash/identity')
const isArray = require('lodash/isArray')
const isObject = require('lodash/isObject')
const micromatch = require('micromatch')
const nicePipe = require('nice-pipe')
@@ -297,11 +298,7 @@ async function listCommands(args) {
str.push(
name,
'=<',
type == null
? 'unknown type'
: Array.isArray(type)
? type.join('|')
: type,
type == null ? 'unknown type' : isArray(type) ? type.join('|') : type,
'>'
)
@@ -386,7 +383,7 @@ async function call(args) {
printProgress
)
return fromCallback(pump, response, progress, output)
return fromCallback(cb => pump(response, progress, output, cb))
}
if (key === '$sendTo') {

View File

@@ -34,7 +34,7 @@
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"rimraf": "^3.0.0"
},

View File

@@ -260,10 +260,7 @@ describe('Collection', function() {
forEach(
{
'add & update → add': [
[
['add', 'foo', 0],
['update', 'foo', 1],
],
[['add', 'foo', 0], ['update', 'foo', 1]],
{
add: {
foo: 1,
@@ -271,19 +268,10 @@ describe('Collection', function() {
},
],
'add & remove → ∅': [
[
['add', 'foo', 0],
['remove', 'foo'],
],
{},
],
'add & remove → ∅': [[['add', 'foo', 0], ['remove', 'foo']], {}],
'update & update → update': [
[
['update', 'bar', 1],
['update', 'bar', 2],
],
[['update', 'bar', 1], ['update', 'bar', 2]],
{
update: {
bar: 2,
@@ -292,10 +280,7 @@ describe('Collection', function() {
],
'update & remove → remove': [
[
['update', 'bar', 1],
['remove', 'bar'],
],
[['update', 'bar', 1], ['remove', 'bar']],
{
remove: {
bar: undefined,
@@ -304,10 +289,7 @@ describe('Collection', function() {
],
'remove & add → update': [
[
['remove', 'bar'],
['add', 'bar', 0],
],
[['remove', 'bar'], ['add', 'bar', 0]],
{
update: {
bar: 0,

View File

@@ -1,4 +1,4 @@
import iteratee from 'lodash/iteratee'
import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import isEmpty from './is-empty'
@@ -17,9 +17,9 @@ export default class Index {
this._keysToHash = Object.create(null)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
}
// This method is used to compute the hash under which an item must

View File

@@ -1,4 +1,4 @@
import iteratee from 'lodash/iteratee'
import { bind, iteratee } from 'lodash'
import clearObject from './clear-object'
import NotImplemented from './not-implemented'
@@ -16,9 +16,9 @@ export default class UniqueIndex {
this._keysToHash = Object.create(null)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
}
// This method is used to compute the hash under which an item must

View File

@@ -1,5 +1,4 @@
import createCallback from 'lodash/iteratee'
import forEach from 'lodash/forEach'
import { bind, forEach, iteratee as createCallback } from 'lodash'
import Collection, {
ACTION_ADD,
@@ -20,9 +19,9 @@ export default class View extends Collection {
this._onAdd(this._collection.all)
// Bound versions of listeners.
this._onAdd = this._onAdd.bind(this)
this._onUpdate = this._onUpdate.bind(this)
this._onRemove = this._onRemove.bind(this)
this._onAdd = bind(this._onAdd, this)
this._onUpdate = bind(this._onUpdate, this)
this._onRemove = bind(this._onRemove, this)
// Register listeners.
this._collection.on(ACTION_ADD, this._onAdd)

View File

@@ -36,7 +36,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,5 +1,5 @@
import { BaseError } from 'make-error'
import { iteratee } from 'lodash'
import { isArray, iteratee } from 'lodash'
class XoError extends BaseError {
constructor({ code, message, data }) {
@@ -77,7 +77,7 @@ export const serverUnreachable = create(9, objectId => ({
}))
export const invalidParameters = create(10, (message, errors) => {
if (Array.isArray(message)) {
if (isArray(message)) {
errors = message
message = undefined
}

View File

@@ -46,7 +46,7 @@
"@types/node": "^12.0.2",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^9.0.0",
"tslint-config-standard": "^8.0.1",
"typescript": "^3.1.6"
},
"scripts": {

View File

@@ -41,7 +41,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,13 +1,13 @@
# xo-remote-parser [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
# ${pkg.name} [![Build Status](https://travis-ci.org/${pkg.shortGitHubPath}.png?branch=master)](https://travis-ci.org/${pkg.shortGitHubPath})
> ${pkg.description}
## Install
Installation of the [npm package](https://npmjs.org/package/xo-remote-parser):
Installation of the [npm package](https://npmjs.org/package/${pkg.name}):
```
> npm install --save xo-remote-parser
> npm install --save ${pkg.name}
```
## Usage
@@ -40,10 +40,10 @@ the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
- report any [issue](${pkg.bugs})
you've encountered;
- fork and create a pull request.
## License
AGPL-3.0 © [Vates SAS](https://vates.fr)
${pkg.license} © [${pkg.author.name}](${pkg.author.url})

View File

@@ -32,7 +32,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"deep-freeze": "^0.0.1",
"rimraf": "^3.0.0"
},

View File

@@ -40,7 +40,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.6.6",
"version": "0.6.5",
"license": "AGPL-3.0",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -39,14 +39,14 @@
"inquirer": "^7.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,7 +1,7 @@
/* eslint no-throw-literal: 0 */
import eventToPromise from 'event-to-promise'
import noop from 'lodash/noop'
import { bind, noop } from 'lodash'
import { createClient } from 'ldapjs'
import { escape } from 'ldapjs/lib/filters/escape'
import { promisify } from 'promise-toolbox'
@@ -9,11 +9,6 @@ import { readFile } from 'fs'
// ===================================================================
const DEFAULTS = {
checkCertificate: true,
filter: '(uid={{name}})',
}
const VAR_RE = /\{\{([^}]+)\}\}/g
const evalFilter = (filter, vars) =>
filter.replace(VAR_RE, (_, name) => {
@@ -48,7 +43,7 @@ If not specified, it will use a default set of well-known CAs.
description:
"Enforce the validity of the server's certificates. You can disable it when connecting to servers that use a self-signed certificate.",
type: 'boolean',
defaults: DEFAULTS.checkCertificate,
default: true,
},
bind: {
description: 'Credentials to use before looking for the user record.',
@@ -81,11 +76,6 @@ For Microsoft Active Directory, it can also be \`<user>@<domain>\`.
description: `
Filter used to find the user.
For LDAP if you want to filter for a special group you can try
something like:
- \`(&(uid={{name}})(memberOf=<group DN>))\`
For Microsoft Active Directory, you can try one of the following filters:
- \`(cn={{name}})\`
@@ -93,12 +83,13 @@ For Microsoft Active Directory, you can try one of the following filters:
- \`(sAMAccountName={{name}}@<domain>)\` (replace \`<domain>\` by your own domain)
- \`(userPrincipalName={{name}})\`
Or something like this if you also want to filter by group:
For LDAP if you want to filter for a special group you can try
something like:
- \`(&(sAMAccountName={{name}})(memberOf=<group DN>))\`
- \`(&(uid={{name}})(memberOf=<group DN>))\`
`.trim(),
type: 'string',
default: DEFAULTS.filter,
default: '(uid={{name}})',
},
},
required: ['uri', 'base'],
@@ -125,7 +116,7 @@ class AuthLdap {
constructor(xo) {
this._xo = xo
this._authenticate = this._authenticate.bind(this)
this._authenticate = bind(this._authenticate, this)
}
async configure(conf) {
@@ -136,11 +127,7 @@ class AuthLdap {
})
{
const {
bind,
checkCertificate = DEFAULTS.checkCertificate,
certificateAuthorities,
} = conf
const { bind, checkCertificate = true, certificateAuthorities } = conf
if (bind) {
clientOpts.bindDN = bind.dn
@@ -160,7 +147,7 @@ class AuthLdap {
const {
bind: credentials,
base: searchBase,
filter: searchFilter = DEFAULTS.filter,
filter: searchFilter = '(uid={{name}})',
} = conf
this._credentials = credentials

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { bind } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { readFile, writeFile } from 'fs'
@@ -16,7 +17,7 @@ const CACHE_FILE = './ldap.cache.conf'
execPromise(async args => {
const config = await promptSchema(
configurationSchema,
await fromCallback(readFile, CACHE_FILE, 'utf-8').then(
await fromCallback(cb => readFile(CACHE_FILE, 'utf-8', cb)).then(
JSON.parse,
() => ({})
)
@@ -43,6 +44,6 @@ execPromise(async args => {
}),
password: await password('Password'),
},
console.log.bind(console)
bind(console.log, console)
)
})

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.7.0",
"version": "0.6.0",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [
@@ -40,7 +40,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-preset-env": "^1.6.1",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -2,10 +2,6 @@ import { Strategy } from 'passport-saml'
// ===================================================================
const DEFAULTS = {
disableRequestedAuthnContext: false,
}
export const configurationSchema = {
description:
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
@@ -34,11 +30,6 @@ You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddr
`,
type: 'string',
},
disableRequestedAuthnContext: {
title: "Don't request an authentication context",
description: 'This is known to help when using Active Directory',
default: DEFAULTS.disableRequestedAuthnContext,
},
},
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
}
@@ -55,7 +46,6 @@ class AuthSamlXoPlugin {
configure({ usernameField, ...conf }) {
this._usernameField = usernameField
this._conf = {
...DEFAULTS,
...conf,
// must match the callback URL

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.4",
"version": "0.16.2",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -36,7 +36,6 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/log": "^0.2.0",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
@@ -49,7 +48,7 @@
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -2,7 +2,6 @@ import createLogger from '@xen-orchestra/log'
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, groupBy, startCase } from 'lodash'
import { get } from '@xen-orchestra/defined'
import pkg from '../package'
const logger = createLogger('xo:xo-server-backup-reports')
@@ -187,7 +186,7 @@ const MARKDOWN_BY_TYPE = {
}
const getMarkdown = (task, props) =>
MARKDOWN_BY_TYPE[task.data?.type]?.(task, props)
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
const toMarkdown = parts => {
const lines = []
@@ -318,7 +317,6 @@ class BackupReportsXoPlugin {
const taskMarkdown = await getMarkdown(task, {
formatDate,
jobName: log.jobName,
xo,
})
if (taskMarkdown === undefined) {
continue
@@ -356,7 +354,7 @@ class BackupReportsXoPlugin {
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
@@ -366,10 +364,9 @@ class BackupReportsXoPlugin {
})
}
async _ngVmHandler(log, { name: jobName, settings }, schedule, force) {
async _ngVmHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const mailReceivers = get(() => settings[''].reportRecipients)
const { reportWhen, mode } = log.data || {}
const formatDate = createDateFormatter(schedule?.timezone)
@@ -392,9 +389,8 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
mailReceivers,
markdown: toMarkdown(markdown),
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
@@ -646,12 +642,11 @@ class BackupReportsXoPlugin {
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
mailReceivers,
markdown: toMarkdown(markdown),
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
success: log.status === 'success',
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
@@ -661,18 +656,12 @@ class BackupReportsXoPlugin {
})
}
_sendReport({
mailReceivers = this._mailsReceivers,
markdown,
nagiosMarkdown,
subject,
success,
}) {
_sendReport({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: mailReceivers,
to: this._mailsReceivers,
subject,
markdown,
}),
@@ -687,14 +676,9 @@ class BackupReportsXoPlugin {
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: success ? 0 : 2,
status: nagiosStatus,
message: nagiosMarkdown,
}),
xo.sendIcinga2Status !== undefined &&
xo.sendIcinga2Status({
status: success ? 'OK' : 'CRITICAL',
message: markdown,
}),
])
}
@@ -724,7 +708,7 @@ class BackupReportsXoPlugin {
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
success: false,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
})
}
@@ -920,7 +904,7 @@ class BackupReportsXoPlugin {
? ICON_FAILURE
: ICON_SKIPPED
}`,
success: globalSuccess,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${

View File

@@ -1,4 +1,8 @@
# xo-server-web-hooks [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
# xo-server-cloud [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Usage
@@ -26,7 +30,7 @@ the web interface, see [the plugin documentation](https://xen-orchestra.com/docs
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:

View File

@@ -1,21 +1,20 @@
{
"name": "xo-server-web-hooks",
"version": "0.1.0",
"license": "AGPL-3.0",
"name": "xo-server-cloud",
"version": "0.3.0",
"license": "ISC",
"description": "",
"keywords": [
"hooks",
"cloud",
"orchestra",
"plugin",
"web",
"xen",
"xen-orchestra",
"xo-server"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-web-hooks",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-cloud",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-web-hooks",
"directory": "packages/xo-server-cloud",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
@@ -30,25 +29,24 @@
"dist/"
],
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"http-request-plus": "^0.8.0",
"lodash": "^4.17.15"
"jsonrpc-websocket-client": "^0.5.0"
},
"devDependencies": {
"@babel/cli": "^7.7.0",
"@babel/core": "^7.7.2",
"@babel/plugin-proposal-optional-chaining": "^7.6.0",
"@babel/preset-env": "^7.7.1",
"cross-env": "^6.0.3",
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},

View File

@@ -0,0 +1,208 @@
import Client, { createBackoff } from 'jsonrpc-websocket-client'
import hrp from 'http-request-plus'
const WS_URL = 'ws://localhost:9001'
const HTTP_URL = 'http://localhost:9002'
// ===================================================================
class XoServerCloud {
constructor({ xo }) {
this._xo = xo
// Defined in configure().
this._conf = null
this._key = null
}
configure(configuration) {
this._conf = configuration
}
async load() {
const getResourceCatalog = this._getCatalog.bind(this)
getResourceCatalog.description =
"Get the list of user's available resources"
getResourceCatalog.permission = 'admin'
getResourceCatalog.params = {
filters: { type: 'object', optional: true },
}
const registerResource = ({ namespace }) =>
this._registerResource(namespace)
registerResource.description = 'Register a resource via cloud plugin'
registerResource.params = {
namespace: {
type: 'string',
},
}
registerResource.permission = 'admin'
const downloadAndInstallResource = this._downloadAndInstallResource.bind(
this
)
downloadAndInstallResource.description =
'Download and install a resource via cloud plugin'
downloadAndInstallResource.params = {
id: { type: 'string' },
namespace: { type: 'string' },
version: { type: 'string' },
sr: { type: 'string' },
}
downloadAndInstallResource.resolve = {
sr: ['sr', 'SR', 'administrate'],
}
downloadAndInstallResource.permission = 'admin'
this._unsetApiMethods = this._xo.addApiMethods({
cloud: {
downloadAndInstallResource,
getResourceCatalog,
registerResource,
},
})
this._unsetRequestResource = this._xo.defineProperty(
'requestResource',
this._requestResource,
this
)
const updater = (this._updater = new Client(WS_URL))
const connect = () =>
updater.open(createBackoff()).catch(error => {
console.error('xo-server-cloud: fail to connect to updater', error)
return connect()
})
updater.on('closed', connect).on('scheduledAttempt', ({ delay }) => {
console.warn('xo-server-cloud: next attempt in %s ms', delay)
})
connect()
}
unload() {
this._unsetApiMethods()
this._unsetRequestResource()
}
// ----------------------------------------------------------------
async _getCatalog({ filters } = {}) {
const catalog = await this._updater.call('getResourceCatalog', { filters })
if (!catalog) {
throw new Error('cannot get catalog')
}
return catalog
}
// ----------------------------------------------------------------
async _getNamespaces() {
const catalog = await this._getCatalog()
if (!catalog._namespaces) {
throw new Error('cannot get namespaces')
}
return catalog._namespaces
}
// ----------------------------------------------------------------
async _downloadAndInstallResource({ id, namespace, sr, version }) {
const stream = await this._requestResource({
hub: true,
id,
namespace,
version,
})
const vm = await this._xo.getXapi(sr.$poolId).importVm(stream, {
srId: sr.id,
type: 'xva',
})
await vm.update_other_config({
'xo:resource:namespace': namespace,
'xo:resource:xva:version': version,
'xo:resource:xva:id': id,
})
}
// ----------------------------------------------------------------
async _registerResource(namespace) {
const _namespace = (await this._getNamespaces())[namespace]
if (_namespace === undefined) {
throw new Error(`${namespace} is not available`)
}
if (_namespace.registered || _namespace.pending) {
throw new Error(`already registered for ${namespace}`)
}
return this._updater.call('registerResource', { namespace })
}
// ----------------------------------------------------------------
async _getNamespaceCatalog({ hub, namespace }) {
const namespaceCatalog = (await this._getCatalog({ filters: { hub } }))[
namespace
]
if (!namespaceCatalog) {
throw new Error(`cannot get catalog: ${namespace} not registered`)
}
return namespaceCatalog
}
// ----------------------------------------------------------------
async _requestResource({ hub = false, id, namespace, version }) {
const _namespace = (await this._getNamespaces())[namespace]
if (!hub && (!_namespace || !_namespace.registered)) {
throw new Error(`cannot get resource: ${namespace} not registered`)
}
const { _token: token } = await this._getNamespaceCatalog({
hub,
namespace,
})
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
if (token === undefined) {
throw new Error(`${namespace} namespace token is undefined`)
}
const downloadToken = await this._updater.call('getResourceDownloadToken', {
token,
id,
version,
})
if (!downloadToken) {
throw new Error('cannot get download token')
}
const response = await hrp(HTTP_URL, {
headers: {
Authorization: `Bearer ${downloadToken}`,
},
})
// currently needed for XenApi#putResource()
response.length = response.headers['content-length']
return response
}
}
export default opts => new XoServerCloud(opts)

View File

@@ -31,7 +31,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"lodash": "^4.16.2"
},
"devDependencies": {

View File

@@ -21,7 +21,7 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/cron": "^1.0.4",
"d3-time-format": "^2.1.1",
"json5": "^2.0.1",
"lodash": "^4.17.4"
@@ -32,7 +32,7 @@
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

View File

@@ -1,6 +1,6 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const COMPARATOR_FN = {
@@ -483,7 +483,7 @@ ${monitorBodies.join('\n')}`
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
Object.assign(result, {
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
@@ -496,7 +496,7 @@ ${monitorBodies.join('\n')}`
definition.alarmTriggerLevel
)
const data = getter(result.object)
Object.assign(result, {
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})

View File

@@ -15,9 +15,9 @@
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.3.1",
"version": "0.3.0",
"engines": {
"node": ">=8.10"
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
@@ -25,12 +25,12 @@
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.4.4",
"cross-env": "^6.0.3"
"cross-env": "^5.2.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.101",
"node-openssl-cert": "^0.0.97",
"promise-toolbox": "^0.14.0",
"uuid": "^3.3.2"
},

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,8 @@ export class OvsdbClient {
Attributes on created OVS ports (corresponds to a XAPI `PIF` or `VIF`):
- `other_config`:
- `xo:sdn-controller:private-network-uuid`: UUID of the private network
- `xo:sdn-controller:cross-pool` : UUID of the remote network connected by the tunnel
- `xo:sdn-controller:private-pool-wide`: `true` if created (and managed) by a SDN Controller
Attributes on created OVS interfaces:
- `options`:
@@ -66,49 +67,55 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
network,
networkUuid,
networkName,
remoteAddress,
encapsulation,
key,
password,
privateNetworkUuid
remoteNetwork
) {
if (
this._adding.find(
elem => elem.id === network.uuid && elem.addr === remoteAddress
elem => elem.id === networkUuid && elem.addr === remoteAddress
) !== undefined
) {
return
}
const adding = { id: network.uuid, addr: remoteAddress }
const adding = { id: networkUuid, addr: remoteAddress }
this._adding.push(adding)
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridge,
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
return bridge.name
return bridgeName
}
const index = ++this._numberOfPortAndInterface
const interfaceName = bridge.name + '_iface' + index
const portName = bridge.name + '_port' + index
const interfaceName = bridgeName + '_iface' + index
const portName = bridgeName + '_port' + index
// Add interface and port to the bridge
const options = { remote_ip: remoteAddress, key: key }
@@ -132,9 +139,11 @@ export class OvsdbClient {
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: toMap({
'xo:sdn-controller:private-network-uuid': privateNetworkUuid,
}),
other_config: toMap(
remoteNetwork !== undefined
? { 'xo:sdn-controller:cross-pool': remoteNetwork }
: { 'xo:sdn-controller:private-pool-wide': 'true' }
),
},
'uuid-name': 'new_port',
}
@@ -142,7 +151,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
@@ -154,7 +163,7 @@ export class OvsdbClient {
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
this._adding = this._adding.filter(
elem => elem.id !== network.uuid || elem.addr !== remoteAddress
elem => elem.id !== networkUuid || elem.addr !== remoteAddress
)
if (jsonObjects === undefined) {
socket.destroy()
@@ -180,8 +189,8 @@ export class OvsdbClient {
details,
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
@@ -191,24 +200,33 @@ export class OvsdbClient {
log.debug('Port and interface added to bridge', {
port: portName,
interface: interfaceName,
bridge: bridge.name,
network: network.name_label,
bridge: bridgeName,
network: networkName,
host: this.host.name_label,
})
socket.destroy()
return bridge.name
return bridgeName
}
async resetForNetwork(network, privateNetworkUuid) {
async resetForNetwork(
networkUuid,
networkName,
crossPoolOnly,
remoteNetwork
) {
const socket = await this._connect()
const bridge = await this._getBridgeForNetwork(network, socket)
if (bridge.uuid === undefined) {
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid === undefined) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridge, socket)
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
socket.destroy()
return
@@ -232,14 +250,15 @@ export class OvsdbClient {
// 2019-09-03
// Compatibility code, to be removed in 1 year.
const oldShouldDelete =
config[0] === 'private_pool_wide' ||
config[0] === 'cross_pool' ||
config[0] === 'xo:sdn-controller:private-pool-wide' ||
config[0] === 'xo:sdn-controller:cross-pool'
(config[0] === 'private_pool_wide' && !crossPoolOnly) ||
(config[0] === 'cross_pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
const shouldDelete =
config[0] === 'xo:sdn-controller:private-network-uuid' &&
config[1] === privateNetworkUuid
(config[0] === 'xo:sdn-controller:private-pool-wide' &&
!crossPoolOnly) ||
(config[0] === 'xo:sdn-controller:cross-pool' &&
(remoteNetwork === undefined || remoteNetwork === config[1]))
if (shouldDelete || oldShouldDelete) {
portsToDelete.push(['uuid', portUuid])
@@ -256,7 +275,7 @@ export class OvsdbClient {
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridge.uuid]]],
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
@@ -269,7 +288,7 @@ export class OvsdbClient {
if (jsonObjects[0].error != null) {
log.error('Error while deleting ports from bridge', {
error: jsonObjects[0].error,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -278,7 +297,7 @@ export class OvsdbClient {
log.debug('Ports deleted from bridge', {
nPorts: jsonObjects[0].result[0].count,
bridge: bridge.name,
bridge: bridgeName,
host: this.host.name_label,
})
socket.destroy()
@@ -316,9 +335,9 @@ export class OvsdbClient {
// ---------------------------------------------------------------------------
async _getBridgeForNetwork(network, socket) {
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
['external_ids', 'includes', toMap({ 'xs-network-uuids': network.uuid })],
['external_ids', 'includes', toMap({ 'xs-network-uuids': networkUuid })],
]
const selectResult = await this._select(
'Bridge',
@@ -328,17 +347,25 @@ export class OvsdbClient {
)
if (selectResult === undefined) {
log.error('No bridge found for network', {
network: network.name_label,
network: networkName,
host: this.host.name_label,
})
return {}
return []
}
return { uuid: selectResult._uuid[1], name: selectResult.name }
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(bridge, remoteAddress, socket) {
const ports = await this._getBridgePorts(bridge, socket)
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports === undefined) {
return false
}
@@ -366,8 +393,8 @@ export class OvsdbClient {
return false
}
async _getBridgePorts(bridge, socket) {
const where = [['_uuid', '==', ['uuid', bridge.uuid]]]
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult === undefined) {
return

View File

@@ -1,202 +0,0 @@
import createLogger from '@xen-orchestra/log'
import { filter, find, forOwn, map, sample } from 'lodash'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller:private-network')
// =============================================================================
const CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?!'
const createPassword = () =>
Array.from({ length: 16 }, _ => sample(CHARS)).join('')
// =============================================================================
export class PrivateNetwork {
constructor(controller, uuid) {
this.controller = controller
this.uuid = uuid
this.networks = {}
}
// ---------------------------------------------------------------------------
async addHost(host) {
if (host.$ref === this.center?.$ref) {
// Nothing to do
return
}
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
log.error('No OVSDB client found', {
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
const centerClient = this.controller.ovsdbClients[this.center.$ref]
if (centerClient === undefined) {
log.error('No OVSDB client found for star-center', {
privateNetwork: this.uuid,
host: this.center.name_label,
pool: this.center.$pool.name_label,
})
return
}
const network = this.networks[host.$pool.uuid]
const centerNetwork = this.networks[this.center.$pool.uuid]
const otherConfig = network.other_config
const encapsulation =
otherConfig['xo:sdn-controller:encapsulation'] ?? 'gre'
const vni = otherConfig['xo:sdn-controller:vni'] ?? '0'
const password =
otherConfig['xo:sdn-controller:encrypted'] === 'true'
? createPassword()
: undefined
let bridgeName
try {
;[bridgeName] = await Promise.all([
hostClient.addInterfaceAndPort(
network,
centerClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
centerClient.addInterfaceAndPort(
centerNetwork,
hostClient.host.address,
encapsulation,
vni,
password,
this.uuid
),
])
} catch (error) {
log.error('Error while connecting host to private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return
}
log.info('Host added', {
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: host.$pool.name_label,
})
return bridgeName
}
addNetwork(network) {
this.networks[network.$pool.uuid] = network
log.info('Adding network', {
privateNetwork: this.uuid,
network: network.name_label,
pool: network.$pool.name_label,
})
if (this.center === undefined) {
return this.electNewCenter()
}
const hosts = filter(network.$pool.$xapi.objects.all, { $type: 'host' })
return Promise.all(
map(hosts, async host => {
const hostClient = this.controller.ovsdbClients[host.$ref]
const network = this.networks[host.$pool.uuid]
await hostClient.resetForNetwork(network, this.uuid)
await this.addHost(host)
})
)
}
async electNewCenter() {
delete this.center
// TODO: make it random
const hosts = this._getHosts()
for (const host of hosts) {
const pif = find(host.$PIFs, {
network: this.networks[host.$pool.uuid].$ref,
})
if (pif?.currently_attached && host.$metrics.live) {
this.center = host
break
}
}
if (this.center === undefined) {
log.error('No available host to elect new star-center', {
privateNetwork: this.uuid,
})
return
}
await this._reset()
// Recreate star topology
await Promise.all(map(hosts, host => this.addHost(host)))
log.info('New star-center elected', {
center: this.center.name_label,
privateNetwork: this.uuid,
})
}
// ---------------------------------------------------------------------------
getPools() {
const pools = []
forOwn(this.networks, network => {
pools.push(network.$pool)
})
return pools
}
// ---------------------------------------------------------------------------
_reset() {
return Promise.all(
map(this._getHosts(), async host => {
// Clean old ports and interfaces
const hostClient = this.controller.ovsdbClients[host.$ref]
if (hostClient === undefined) {
return
}
const network = this.networks[host.$pool.uuid]
try {
await hostClient.resetForNetwork(network, this.uuid)
} catch (error) {
log.error('Error while resetting private network', {
error,
privateNetwork: this.uuid,
network: network.name_label,
host: host.name_label,
pool: network.$pool.name_label,
})
}
})
)
}
// ---------------------------------------------------------------------------
_getHosts() {
const hosts = []
forOwn(this.networks, network => {
hosts.push(...filter(network.$pool.$xapi.objects.all, { $type: 'host' }))
})
return hosts
}
}

View File

@@ -8,8 +8,5 @@
"directory": "packages/xo-server-test-plugin",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"engines": {
"node": "*"
}
}

View File

@@ -36,7 +36,7 @@
"golike-defer": "^0.4.1",
"jest": "^24.8.0",
"lodash": "^4.17.11",
"promise-toolbox": "^0.14.0",
"promise-toolbox": "^0.13.0",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-lib": "^0.9.0"

View File

@@ -14,7 +14,6 @@
[vms]
default = ''
withOsAndXenTools = ''
# vmToBackup = ''
[templates]

View File

@@ -154,19 +154,6 @@ class XoConnection extends Xo {
})
}
async startTempVm(id, params, withXenTools = false) {
await this.call('vm.start', { id, ...params })
this._tempResourceDisposers.push('vm.stop', { id, force: true })
return this.waitObjectState(id, vm => {
if (
vm.power_state !== 'Running' ||
(withXenTools && vm.xenTools === false)
) {
throw new Error('retry')
}
})
}
async createTempRemote(params) {
const remote = await this.call('remote.create', params)
this._tempResourceDisposers.push('remote.delete', { id: remote.id })

View File

@@ -55,68 +55,6 @@ Object {
}
`;
exports[`backupNg create and execute backup with enabled offline backup 1`] = `
Object {
"data": Object {
"id": Any<String>,
"type": "VM",
},
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 2`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 3`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 4`] = `
Object {
"data": Any<Object>,
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg create and execute backup with enabled offline backup 5`] = `
Object {
"end": Any<Number>,
"id": Any<String>,
"message": Any<String>,
"result": Object {
"size": Any<Number>,
},
"start": Any<Number>,
"status": "success",
}
`;
exports[`backupNg execute three times a delta backup with 2 remotes, 2 as retention and 2 as fullInterval 1`] = `
Object {
"data": Object {

View File

@@ -584,110 +584,4 @@ describe('backupNg', () => {
})
})
})
test('create and execute backup with enabled offline backup', async () => {
const vm = xo.objects.all[config.vms.withOsAndXenTools]
if (vm.power_state !== 'Running') {
await xo.startTempVm(vm.id, { force: true }, true)
}
const scheduleTempId = randomId()
const srId = config.srs.default
const { id: remoteId } = await xo.createTempRemote(config.remotes.default)
const backupInput = {
mode: 'full',
remotes: {
id: remoteId,
},
schedules: {
[scheduleTempId]: getDefaultSchedule(),
},
settings: {
'': {
offlineBackup: true,
},
[scheduleTempId]: {
copyRetention: 1,
exportRetention: 1,
},
},
srs: {
id: srId,
},
vms: {
id: vm.id,
},
}
const backup = await xo.createTempBackupNgJob(backupInput)
expect(backup.settings[''].offlineBackup).toBe(true)
const schedule = await xo.getSchedule({ jobId: backup.id })
await Promise.all([
xo.runBackupJob(backup.id, schedule.id, { remotes: [remoteId] }),
xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Halted') {
throw new Error('retry')
}
}),
])
await xo.waitObjectState(vm.id, vm => {
if (vm.power_state !== 'Running') {
throw new Error('retry')
}
})
const backupLogs = await xo.getBackupLogs({
jobId: backup.id,
scheduleId: schedule.id,
})
expect(backupLogs.length).toBe(1)
const { tasks, ...log } = backupLogs[0]
validateRootTask(log, {
data: {
mode: backupInput.mode,
reportWhen: backupInput.settings[''].reportWhen,
},
jobId: backup.id,
jobName: backupInput.name,
scheduleId: schedule.id,
status: 'success',
})
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...vmTask }) => {
validateVmTask(vmTask, vm.id, { status: 'success' })
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(({ tasks, ...subTask }) => {
expect(subTask.message).not.toBe('snapshot')
if (subTask.message === 'export') {
validateExportTask(
subTask,
subTask.data.type === 'remote' ? remoteId : srId,
{
data: expect.any(Object),
status: 'success',
}
)
expect(Array.isArray(tasks)).toBe(true)
tasks.forEach(operationTask => {
if (
operationTask.message === 'transfer' ||
operationTask.message === 'merge'
) {
validateOperationTask(operationTask, {
result: { size: expect.any(Number) },
status: 'success',
})
}
})
}
})
})
}, 200e3)
})

View File

@@ -6,7 +6,7 @@ import expect from 'must'
// ===================================================================
import { getConfig, getMainConnection, getSrId, waitObjectState } from './util'
import { map } from 'lodash'
import { map, assign } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -27,7 +27,7 @@ describe('disk', () => {
const config = await getConfig()
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -1,6 +1,6 @@
/* eslint-env jest */
import { find, map } from 'lodash'
import { assign, find, map } from 'lodash'
import { config, rejectionOf, xo } from './util'
@@ -60,16 +60,14 @@ describe('server', () => {
autoConnect: false,
})
expect(
(
await rejectionOf(
addServer({
host: 'xen1.example.org',
username: 'root',
password: 'password',
autoConnect: false,
})
)
).message
(await rejectionOf(
addServer({
host: 'xen1.example.org',
username: 'root',
password: 'password',
autoConnect: false,
})
)).message
).toBe('unknown error from the peer')
})
@@ -153,7 +151,7 @@ describe('server', () => {
it('connects to a Xen server', async () => {
const serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
@@ -186,7 +184,7 @@ describe('server', () => {
let serverId
beforeEach(async () => {
serverId = await addServer(
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', {
id: serverId,

View File

@@ -12,7 +12,7 @@ import {
getOneHost,
waitObjectState,
} from './util'
import { map } from 'lodash'
import { assign, map } from 'lodash'
import eventToPromise from 'event-to-promise'
// ===================================================================
@@ -33,7 +33,7 @@ describe('vbd', () => {
serverId = await xo.call(
'server.add',
Object.assign({ autoConnect: false }, config.xenServer1)
assign({ autoConnect: false }, config.xenServer1)
)
await xo.call('server.connect', { id: serverId })
await eventToPromise(xo.objects, 'finish')

View File

@@ -60,15 +60,13 @@ describe('cd', () => {
await getOrWaitCdVbdPosition(vmId)
expect(
(
await rejectionOf(
xo.call('vm.insertCd', {
id: vmId,
cd_id: config.ubuntuIsoId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.insertCd', {
id: vmId,
cd_id: config.ubuntuIsoId,
force: false,
})
)).message
).toBe('unknown error from the peer')
})

View File

@@ -126,14 +126,12 @@ describe('the VM life cyle', () => {
})
expect(
(
await rejectionOf(
xo.call('vm.restart', {
id: hvmWithoutToolsId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.restart', {
id: hvmWithoutToolsId,
force: false,
})
)).message
).toBe('VM lacks feature shutdown')
})
@@ -198,14 +196,12 @@ describe('the VM life cyle', () => {
})
expect(
(
await rejectionOf(
xo.call('vm.stop', {
id: hvmWithoutToolsId,
force: false,
})
)
).message
(await rejectionOf(
xo.call('vm.stop', {
id: hvmWithoutToolsId,
force: false,
})
)).message
).toBe('clean shutdown requires PV drivers')
})

View File

@@ -34,14 +34,14 @@
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.14.0"
"promise-toolbox": "^0.13.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^6.0.3",
"cross-env": "^5.1.3",
"rimraf": "^3.0.0"
},
"scripts": {

Some files were not shown because too many files have changed in this diff Show More