Compare commits

..

1 Commits

Author SHA1 Message Date
Julien Fontanet
47732f7f5a fix(xen-api): remove Promise.race 2019-04-04 11:57:43 +02:00
205 changed files with 4710 additions and 8386 deletions

View File

@@ -1,7 +1,5 @@
module.exports = {
extends: [
'plugin:eslint-comments/recommended',
'standard',
'standard-jsx',
'prettier',
@@ -21,7 +19,7 @@ module.exports = {
overrides: [
{
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
files: ['packages/*cli*/**/*.js', '*-cli.js'],
rules: {
'no-console': 'off',
},
@@ -35,9 +33,6 @@ module.exports = {
},
},
rules: {
// disabled because XAPI objects are using camel case
camelcase: ['off'],
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',

View File

@@ -46,7 +46,6 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -16,9 +16,6 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.26.0"
},
"scripts": {
"postversion": "npm publish"
"xen-api": "^0.25.0"
}
}

View File

@@ -55,7 +55,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -42,7 +42,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/fs",
"version": "0.10.0",
"version": "0.8.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -21,19 +21,18 @@
"node": ">=6"
},
"dependencies": {
"@marsaud/smb2": "^0.14.0",
"@marsaud/smb2": "^0.13.0",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.5.0",
"execa": "^1.0.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.5.0"
},
"devDependencies": {
@@ -41,13 +40,12 @@
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"dotenv": "^8.0.0",
"dotenv": "^7.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
@@ -57,7 +55,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -4,7 +4,6 @@
import getStream from 'get-stream'
import asyncMap from '@xen-orchestra/async-map'
import limit from 'limit-concurrency-decorator'
import path from 'path'
import synchronized from 'decorator-synchronized'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
@@ -32,7 +31,6 @@ const computeRate = (hrtime: number[], size: number) => {
}
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
@@ -85,25 +83,6 @@ export default class RemoteHandlerAbstract {
}
}
;({ timeout: this._timeout = DEFAULT_TIMEOUT } = options)
const sharedLimit = limit(
options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS
)
this.closeFile = sharedLimit(this.closeFile)
this.getInfo = sharedLimit(this.getInfo)
this.getSize = sharedLimit(this.getSize)
this.list = sharedLimit(this.list)
this.mkdir = sharedLimit(this.mkdir)
this.openFile = sharedLimit(this.openFile)
this.outputFile = sharedLimit(this.outputFile)
this.read = sharedLimit(this.read)
this.readFile = sharedLimit(this.readFile)
this.rename = sharedLimit(this.rename)
this.rmdir = sharedLimit(this.rmdir)
this.truncate = sharedLimit(this.truncate)
this.unlink = sharedLimit(this.unlink)
this.write = sharedLimit(this.write)
this.writeFile = sharedLimit(this.writeFile)
}
// Public members
@@ -421,10 +400,6 @@ export default class RemoteHandlerAbstract {
}
}
async truncate(file: string, len: number): Promise<void> {
await this._truncate(file, len)
}
async unlink(file: string, { checksum = true }: Object = {}): Promise<void> {
file = normalizePath(file)
@@ -435,18 +410,6 @@ export default class RemoteHandlerAbstract {
await this._unlink(file).catch(ignoreEnoent)
}
async write(
file: File,
buffer: Buffer,
position: number
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
await this._write(
typeof file === 'string' ? normalizePath(file) : file,
buffer,
position
)
}
async writeFile(
file: string,
data: Data,
@@ -583,28 +546,6 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
const isPath = typeof file === 'string'
if (isPath) {
file = await this.openFile(file, 'r+')
}
try {
return await this._writeFd(file, buffer, position)
} finally {
if (isPath) {
await this.closeFile(file)
}
}
}
async _writeFd(
fd: FileDescriptor,
buffer: Buffer,
position: number
): Promise<void> {
throw new Error('Not implemented')
}
async _writeFile(
file: string,
data: Data,

View File

@@ -3,9 +3,9 @@
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import getStream from 'get-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { random } from 'lodash'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -310,70 +310,5 @@ handlers.forEach(url => {
await handler.unlink('file')
})
})
describe('#write()', () => {
beforeEach(() => handler.outputFile('file', TEST_DATA))
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
forOwn(
{
'dont increase file size': (() => {
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
const expected = Buffer.from(TEST_DATA)
PATCH_DATA.copy(expected, offset)
return { offset, expected }
})(),
'increase file size': (() => {
const offset = random(
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
TEST_DATA_LEN
)
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
TEST_DATA.copy(expected)
PATCH_DATA.copy(expected, offset)
return { offset, expected }
})(),
},
({ offset, expected }, title) => {
describe(title, () => {
testWithFileDescriptor('file', 'r+', async ({ file }) => {
await handler.write(file, PATCH_DATA, offset)
await expect(await handler.readFile('file')).toEqual(expected)
})
})
}
)
})
describe('#truncate()', () => {
forOwn(
{
'shrinks file': (() => {
const length = random(0, TEST_DATA_LEN)
const expected = TEST_DATA.slice(0, length)
return { length, expected }
})(),
'grows file': (() => {
const length = random(TEST_DATA_LEN, TEST_DATA_LEN * 2)
const expected = Buffer.alloc(length)
TEST_DATA.copy(expected)
return { length, expected }
})(),
},
({ length, expected }, title) => {
it(title, async () => {
await handler.outputFile('file', TEST_DATA)
await handler.truncate('file', length)
await expect(await handler.readFile('file')).toEqual(expected)
})
}
)
})
})
})

View File

@@ -106,18 +106,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
await fs.access(path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return fs.truncate(this._getFilePath(file), len)
}
async _unlink(file) {
return fs.unlink(this._getFilePath(file))
}
_writeFd(file, buffer, position) {
return fs.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
}

View File

@@ -155,20 +155,10 @@ export default class SmbHandler extends RemoteHandlerAbstract {
return this.list('.')
}
_truncate(file, len) {
return this._client
.truncate(this._getFilePath(file), len)
.catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client
.writeFile(this._getFilePath(file), data, options)

View File

@@ -24,19 +24,6 @@ log.info('this information is relevant to the user')
log.warn('something went wrong but did not prevent current action')
log.error('something went wrong')
log.fatal('service/app is going down')
// you can add contextual info
log.debug('new API request', {
method: 'foo',
params: [ 'bar', 'baz' ]
user: 'qux'
})
// by convention, errors go into the `error` field
log.error('could not join server', {
error,
server: 'example.org',
})
```
Then, at application level, configure the logs are handled:

View File

@@ -31,7 +31,7 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -48,7 +48,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,12 +1,10 @@
import LEVELS, { NAMES } from '../levels'
// Bind console methods (necessary for browsers)
/* eslint-disable no-console */
const debugConsole = console.log.bind(console)
const infoConsole = console.info.bind(console)
const warnConsole = console.warn.bind(console)
const errorConsole = console.error.bind(console)
/* eslint-enable no-console */
const { ERROR, INFO, WARN } = LEVELS

View File

@@ -1,6 +1,7 @@
import fromCallback from 'promise-toolbox/fromCallback'
import splitHost from 'split-host'
import { createClient, Facility, Severity, Transport } from 'syslog-client'
import splitHost from 'split-host' // eslint-disable-line node/no-extraneous-import node/no-missing-import
import startsWith from 'lodash/startsWith'
import { createClient, Facility, Severity, Transport } from 'syslog-client' // eslint-disable-line node/no-extraneous-import node/no-missing-import
import LEVELS from '../levels'
@@ -18,10 +19,10 @@ const facility = Facility.User
export default target => {
const opts = {}
if (target !== undefined) {
if (target.startsWith('tcp://')) {
if (startsWith(target, 'tcp://')) {
target = target.slice(6)
opts.transport = Transport.Tcp
} else if (target.startsWith('udp://')) {
} else if (startsWith(target, 'udp://')) {
target = target.slice(6)
opts.transport = Transport.Udp
}

View File

@@ -45,7 +45,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,139 +1,5 @@
# ChangeLog
## **next**
### Enhancements
### Bug fixes
### Released packages
## **5.36.0** (2019-06-27)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Highlights
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
- [Host/advanced] Fix host CPU hyperthreading detection [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4285](https://github.com/vatesfr/xen-orchestra/pull/4285))
- [VM/Advanced] Ability to use UEFI instead of BIOS [#4264](https://github.com/vatesfr/xen-orchestra/issues/4264) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4268))
- [Backup-ng/restore] Display size for full VM backup [#4009](https://github.com/vatesfr/xen-orchestra/issues/4009) (PR [#4245](https://github.com/vatesfr/xen-orchestra/pull/4245))
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
- [Host/storages, SR/hosts] Display PBD details [#4264](https://github.com/vatesfr/xen-orchestra/issues/4161) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4284))
- [auth-saml] Improve compatibility with Microsoft Azure Active Directory (PR [#4294](https://github.com/vatesfr/xen-orchestra/pull/4294))
### Enhancements
- [Host] Display warning when "Citrix Hypervisor" license has restrictions [#4251](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4279))
- [VM/Backup] Create backup bulk action [#2573](https://github.com/vatesfr/xen-orchestra/issues/2573) (PR [#4257](https://github.com/vatesfr/xen-orchestra/pull/4257))
- [Host] Display warning when host's time differs too much from XOA's time [#4113](https://github.com/vatesfr/xen-orchestra/issues/4113) (PR [#4173](https://github.com/vatesfr/xen-orchestra/pull/4173))
- [VM/network] Display and set bandwidth rate-limit of a VIF [#4215](https://github.com/vatesfr/xen-orchestra/issues/4215) (PR [#4293](https://github.com/vatesfr/xen-orchestra/pull/4293))
- [SDN Controller] New plugin which enables creating pool-wide private networks [xcp-ng/xcp#175](https://github.com/xcp-ng/xcp/issues/175) (PR [#4269](https://github.com/vatesfr/xen-orchestra/pull/4269))
### Bug fixes
- [XOA] Don't require editing the _email_ field in case of re-registration (PR [#4259](https://github.com/vatesfr/xen-orchestra/pull/4259))
- [Metadata backup] Missing XAPIs should trigger a failure job [#4281](https://github.com/vatesfr/xen-orchestra/issues/4281) (PR [#4283](https://github.com/vatesfr/xen-orchestra/pull/4283))
- [iSCSI] Fix fibre channel paths display [#4291](https://github.com/vatesfr/xen-orchestra/issues/4291) (PR [#4303](https://github.com/vatesfr/xen-orchestra/pull/4303))
- [New VM] Fix tooltips not displayed on disabled elements in some browsers (e.g. Google Chrome) [#4304](https://github.com/vatesfr/xen-orchestra/issues/4304) (PR [#4309](https://github.com/vatesfr/xen-orchestra/pull/4309))
### Released packages
- xo-server-auth-ldap v0.6.5
- xen-api v0.26.0
- xo-server-sdn-controller v0.1
- xo-server-auth-saml v0.6.0
- xo-server-backup-reports v0.16.2
- xo-server v5.44.0
- xo-web v5.44.0
## **5.35.0** (2019-05-29)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Enhancements
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
- [Host] Display hyperthreading status on advanced tab [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4263](https://github.com/vatesfr/xen-orchestra/pull/4263))
### Bug fixes
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
- [Remotes] Fix disconnected remotes which may appear to work
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
### Released packages
- xo-server-backup-reports v0.16.1
- @xen-orchestra/fs v0.9.0
- vhd-lib v0.7.0
- xo-server v5.42.1
- xo-web v5.42.1
## **5.34.0** (2019-04-30)
### Highlights
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
### Enhancements
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
### Bug fixes
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
### Released packages
- xo-server-backup-reports v0.16.0
- complex-matcher v0.6.0
- xo-vmdk-to-vhd v0.1.7
- vhd-lib v0.6.1
- xo-server v5.40.0
- xo-web v5.40.1
## **5.33.1** (2019-04-04)
### Bug fix
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
### Released packages
- xen-api v0.25.1
- xo-server v5.38.2
## **5.33.0** (2019-03-29)
### Enhancements

View File

@@ -1,37 +1,16 @@
> This file contains all changes that have not been released yet.
>
> Keep in mind the changelog is addressed to **users** and should be
> understandable by them.
### Enhancements
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Stats] Ability to display last day stats [#4160](https://github.com/vatesfr/xen-orchestra/issues/4160) (PR [#4168](https://github.com/vatesfr/xen-orchestra/pull/4168))
- [Settings/servers] Display servers connection issues [#4300](https://github.com/vatesfr/xen-orchestra/issues/4300) (PR [#4310](https://github.com/vatesfr/xen-orchestra/pull/4310))
- [VM] Permission to revert to any snapshot for VM operators [#3928](https://github.com/vatesfr/xen-orchestra/issues/3928) (PR [#4247](https://github.com/vatesfr/xen-orchestra/pull/4247))
- [VM] Show current operations and progress [#3811](https://github.com/vatesfr/xen-orchestra/issues/3811) (PR [#3982](https://github.com/vatesfr/xen-orchestra/pull/3982))
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [Settings/Servers] Fix read-only setting toggling
- [SDN Controller] Do not choose physical PIF without IP configuration for tunnels. (PR [#4319](https://github.com/vatesfr/xen-orchestra/pull/4319))
- [Xen servers] Fix `no connection found for object` error if pool master is reinstalled [#4299](https://github.com/vatesfr/xen-orchestra/issues/4299) (PR [#4302](https://github.com/vatesfr/xen-orchestra/pull/4302))
- [Backup-ng/restore] Display correct size for full VM backup [#4316](https://github.com/vatesfr/xen-orchestra/issues/4316) (PR [#4332](https://github.com/vatesfr/xen-orchestra/pull/4332))
- [VM/tab-advanced] Fix CPU limits edition (PR [#4337](https://github.com/vatesfr/xen-orchestra/pull/4337))
- [Remotes] Fix `EIO` errors due to massive parallel fs operations [#4323](https://github.com/vatesfr/xen-orchestra/issues/4323) (PR [#4330](https://github.com/vatesfr/xen-orchestra/pull/4330))
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
### Released packages
> Packages will be released in the order they are here, therefore, they should
> be listed by inverse order of dependency.
>
> Rule of thumb: add packages on top.
- @xen-orchestra/fs v0.10.0
- xo-server-sdn-controller v0.1.1
- xen-api v0.26.0
- xo-server v5.45.0
- xo-web v5.45.0
- vhd-lib v0.6.1
- xo-server v5.39.0
- xo-web v5.39.0

View File

@@ -14,5 +14,5 @@
1. create a PR as soon as possible
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
1. when you want a review, add a reviewer (and only one)
1. when you want a review, add a reviewer
1. if necessary, update your PR, and re- add a reviewer

View File

@@ -1,4 +1,4 @@
# Xen Orchestra [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
# Xen Orchestra [![Chat with us](https://storage.crisp.im/plugins/images/936925df-f37b-4ba8-bab0-70cd2edcb0be/badge.svg)](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
![](http://i.imgur.com/tRffA5y.png)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -1,13 +1,13 @@
# Installation
SSH to your XenServer/XCP-ng host and execute the following:
SSH to your XenServer and execute the following:
```
bash -c "$(curl -s http://xoa.io/deploy)"
```
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
## [More on XOA](xoa.md)

View File

@@ -1,6 +1,6 @@
# Metadata backup
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
## Introduction
@@ -11,38 +11,21 @@ In Xen Orchestra, Metadata backup is divided into two different options:
* Pool metadata backup
* XO configuration backup
### Performing a backup
### How to use metadata backup
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
![](./assets/metadata-1.png)
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
![](https://user-images.githubusercontent.com/21563339/53413921-bd636f00-39cd-11e9-8a3c-d4f893135fa4.png)
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
![](./assets/metadata-2.png)
![](https://user-images.githubusercontent.com/21563339/52416838-d2de2b00-2aea-11e9-8da0-340fcb2767db.png)
Define the name and retention for the job.
![](./assets/metadata-3.png)
![](https://user-images.githubusercontent.com/21563339/52471527-65390a00-2b91-11e9-8019-600a4d9eeafb.png)
Once created, the job is displayed with the other classic jobs.
![](./assets/metadata-4.png)
![](https://user-images.githubusercontent.com/21563339/52416802-c0fc8800-2aea-11e9-8ef0-b0c1bd0e48b8.png)
### Performing a restore
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
![](./assets/metadata-5.png)
If you click this button, it will show you Metadata backups available for restore:
![](./assets/metadata-6.png)
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
![](./assets/metadata-7.png)
That's it!
> Restore for metadata backup jobs should be available in XO 5.33

View File

@@ -1,33 +1,24 @@
# Support
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
You can access our pro support if you subscribe to any of these plans:
XOA is available in multiple plans:
* Free
* Starter
* Enterprise
* Premium
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
## XOA Free support
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
The better the plan, the faster the support will be with higher priority.
## Community support
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
* In our [forum](https://xen-orchestra.com/forum/)
* In our IRC - `#xen-orchestra` on `Freenode`
We encourage you to give back to the community by assisting other users via these two avenues as well.
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
If you are using Xen Orchestra in production, please subscribe to a plan.
## Open a ticket
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)

View File

@@ -6,24 +6,23 @@
"babel-eslint": "^10.0.1",
"babel-jest": "^24.1.0",
"benchmark": "^2.1.4",
"eslint": "^6.0.1",
"eslint-config-prettier": "^6.0.0",
"eslint": "^5.1.0",
"eslint-config-prettier": "^4.1.0",
"eslint-config-standard": "12.0.0",
"eslint-config-standard-jsx": "^6.0.2",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-node": "^8.0.0",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.102.0",
"globby": "^10.0.0",
"husky": "^3.0.0",
"flow-bin": "^0.95.1",
"globby": "^9.0.0",
"husky": "^1.2.1",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"sorted-object": "^2.0.1"
},
"engines": {

View File

@@ -1,6 +1,6 @@
{
"name": "complex-matcher",
"version": "0.6.0",
"version": "0.5.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -44,7 +44,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -599,13 +599,6 @@ export const parse = parser.parse.bind(parser)
// -------------------------------------------------------------------
const _extractStringFromRegexp = child => {
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
return unescapedRegexp
}
}
const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof Or) {
const strings = []
@@ -613,12 +606,6 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
strings.push(child.value)
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
strings.push(unescapedRegexp)
}
}
})
return strings
}
@@ -626,12 +613,6 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
return [child.value]
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
return [unescapedRegexp]
}
}
return []
}

View File

@@ -12,13 +12,10 @@ import {
} from './'
it('getPropertyClausesStrings', () => {
const tmp = getPropertyClausesStrings(
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
)
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
expect(tmp).toEqual({
bar: ['baz'],
baz: ['foo', 'bar', 'boo', 'far'],
foo: ['bar'],
baz: ['foo', 'bar'],
})
})

View File

@@ -43,7 +43,6 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -27,12 +27,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.10.0",
"@xen-orchestra/fs": "^0.8.0",
"cli-progress": "^2.0.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.7.0"
"vhd-lib": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -40,18 +40,17 @@
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"execa": "^1.0.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"rimraf": "^2.6.1",
"tmp": "^0.1.0"
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-lib",
"version": "0.7.0",
"version": "0.6.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -22,11 +22,11 @@
},
"dependencies": {
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"core-js": "3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
@@ -35,16 +35,16 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.10.0",
"@xen-orchestra/fs": "^0.8.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^2.0.2",
"execa": "^1.0.0",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^2.6.2",
"tmp": "^0.1.0"
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -52,7 +52,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -1,7 +1,9 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import constantStream from './_constant-stream'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
@@ -230,11 +232,24 @@ export default class Vhd {
// Write functions.
// =================================================================
// Write a buffer at a given position in a vhd file.
// Write a buffer/stream at a given position in a vhd file.
async _write(data, offset) {
assert(Buffer.isBuffer(data))
debug(`_write offset=${offset} size=${data.length}`)
return this._handler.write(this._path, data, offset)
debug(
`_write offset=${offset} size=${
Buffer.isBuffer(data) ? data.length : '???'
}`
)
// TODO: could probably be merged in remote handlers.
const stream = await this._handler.createOutputStream(this._path, {
flags: 'r+',
start: offset,
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
async _freeFirstBlockSpace(spaceNeededBytes) {
@@ -291,7 +306,7 @@ export default class Vhd {
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
@@ -316,7 +331,10 @@ export default class Vhd {
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
this._write(
constantStream([0], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
this._setBatEntry(blockId, blockAddr),
])
@@ -364,7 +382,9 @@ export default class Vhd {
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`writeBlockSectors at ${offset} block=${block.id}, sectors=${beginSectorId}...${endSectorId}`
`writeBlockSectors at ${offset} block=${
block.id
}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {

View File

@@ -41,7 +41,7 @@
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.26.0"
"xen-api": "^0.25.0"
},
"devDependencies": {
"@babel/cli": "^7.1.5",
@@ -56,7 +56,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -82,7 +82,7 @@ console.log(xapi.pool.$master.$resident_VMs[0].name_label)
A CLI is provided to help exploration and discovery of the XAPI.
```
> xen-api xen1.company.net root
> xen-api https://xen1.company.net root
Password: ******
root@xen1.company.net> xapi.status
'connected'
@@ -92,14 +92,6 @@ root@xen1.company.net> xapi.pool.$master.name_label
'xen1'
```
You can optionally prefix the address by a protocol: `https://` (default) or `http://`.
In case of error due to invalid or self-signed certificates you can use the `--allow-unauthorized` flag (or `--au`):
```
> xen-api --au xen1.company.net root
```
To ease searches, `find()` and `findAll()` functions are available:
```

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.26.0",
"version": "0.25.0",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -33,7 +33,6 @@
"node": ">=6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"event-to-promise": "^0.8.0",
@@ -46,7 +45,7 @@
"make-error": "^1.3.0",
"minimist": "^1.2.0",
"ms": "^2.1.1",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"pw": "0.0.4",
"xmlrpc": "^1.3.2",
"xo-collection": "^0.4.1"
@@ -69,7 +68,6 @@
"plot": "gnuplot -p memory-test.gnu",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -0,0 +1,142 @@
function request() {
if (this._requested) {
return
}
this._requested = true
const resolve = this._resolve
if (resolve !== undefined) {
this._resolve = undefined
resolve()
}
const listeners = this._listeners
if (listeners !== undefined) {
this._listeners = undefined
for (let i = 0, n = listeners.length; i < n; ++i) {
listeners[i].call(this)
}
}
}
const INTERNAL = {}
function Source(signals) {
const request_ = (this.request = request.bind(
(this.signal = new Signal(INTERNAL))
))
if (signals === undefined) {
return
}
const n = signals.length
for (let i = 0; i < n; ++i) {
if (signals[i].requested) {
request_()
return
}
}
for (let i = 0; i < n; ++i) {
signals[i].addListener(request_)
}
}
class Subscription {
constructor(signal, listener) {
this._listener = listener
this._signal = signal
}
get closed() {
return this._signal === undefined
}
unsubscribe() {
const signal = this._signal
if (signal !== undefined) {
const listener = this._listener
this._listener = this._signal = undefined
const listeners = signal._listeners
if (listeners !== undefined) {
const i = listeners.indexOf(listener)
if (i !== -1) {
listeners.splice(i, 1)
}
}
}
}
}
const closedSubscription = new Subscription()
export default class Signal {
static source(signals) {
return new Source(signals)
}
constructor(executor) {
this._listeners = undefined
this._promise = undefined
this._requested = false
this._resolve = undefined
if (executor !== INTERNAL) {
executor(request.bind(this))
}
}
get description() {
return this._description
}
get requested() {
return this._requested
}
throwIfRequested() {
if (this._requested) {
throw new Error('this signal has been requested')
}
}
// ===========================================================================
// Promise like API
// ===========================================================================
then(listener) {
if (typeof listener !== 'function') {
return this
}
let promise = this._promise
if (promise === undefined) {
const requested = this._requested
promise = this._promise = requested
? Promise.resolve()
: new Promise(resolve => {
this._resolve = resolve
})
}
return promise.then(listener)
}
// ===========================================================================
// Observable like API (but not compatible)
// ===========================================================================
subscribe(listener) {
if (this._requested) {
listener.call(this)
return closedSubscription
}
const listeners = this._listeners
if (listeners === undefined) {
this._listeners = [listener]
} else {
listeners.push(listener)
}
return new Subscription(this, listener)
}
}

View File

@@ -9,7 +9,6 @@ import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
import { createClient } from './'
@@ -26,20 +25,6 @@ function askPassword(prompt = 'Password: ') {
})
}
const { getPrototypeOf, ownKeys } = Reflect
function getAllBoundDescriptors(object) {
const descriptors = { __proto__: null }
let current = object
do {
ownKeys(current).forEach(key => {
if (!(key in descriptors)) {
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
}
})
} while ((current = getPrototypeOf(current)) !== null)
return descriptors
}
// ===================================================================
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
@@ -93,17 +78,11 @@ const main = async args => {
const repl = createRepl({
prompt: `${xapi._humanId}> `,
})
repl.context.xapi = xapi
{
const ctx = repl.context
ctx.xapi = xapi
ctx.diff = (a, b) => console.log('%s', diff(a, b))
ctx.find = predicate => find(xapi.objects.all, predicate)
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
}
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
repl.context.find = predicate => find(xapi.objects.all, predicate)
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {

View File

@@ -25,6 +25,7 @@ import isReadOnlyCall from './_isReadOnlyCall'
import makeCallSetting from './_makeCallSetting'
import parseUrl from './_parseUrl'
import replaceSensitiveValues from './_replaceSensitiveValues'
import Signal from './_Signal'
import XapiError from './_XapiError'
// ===================================================================
@@ -34,7 +35,7 @@ const EVENT_TIMEOUT = 60
// ===================================================================
const { defineProperties, defineProperty, freeze, keys: getKeys } = Object
const { defineProperties, freeze, keys: getKeys } = Object
// -------------------------------------------------------------------
@@ -92,22 +93,15 @@ export class Xapi extends EventEmitter {
this._allowUnauthorized = opts.allowUnauthorized
this._setUrl(url)
this._connected = new Promise(resolve => {
this._resolveConnected = resolve
})
this._disconnected = Promise.resolve()
this._connected = Signal.source()
this._disconnected = Signal.source()
this._sessionId = undefined
this._status = DISCONNECTED
this._watchEventsError = undefined
this._lastEventFetchedTimestamp = undefined
this._debounce = opts.debounce ?? 200
this._objects = new Collection()
this._objectsByRef = { __proto__: null }
this._objectsFetched = new Promise(resolve => {
this._resolveObjectsFetched = resolve
})
this._objectsFetched = Signal.source()
this._eventWatchers = { __proto__: null }
this._taskWatchers = { __proto__: null }
this._watchedTypes = undefined
@@ -133,11 +127,11 @@ export class Xapi extends EventEmitter {
// ===========================================================================
get connected() {
return this._connected
return this._connected.signal
}
get disconnected() {
return this._disconnected
return this._disconnected.signal
}
get pool() {
@@ -164,17 +158,30 @@ export class Xapi extends EventEmitter {
assert(status === DISCONNECTED)
this._status = CONNECTING
this._disconnected = new Promise(resolve => {
this._resolveDisconnected = resolve
})
this._disconnected = Signal.source()
try {
await this._sessionOpen()
// Uses introspection to list available types.
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }
types.forEach(type => {
const lcType = type.toLowerCase()
if (lcType !== type) {
this._lcToTypes[lcType] = type
}
})
this._pool = (await this.getAllRecords('pool'))[0]
debug('%s: connected', this._humanId)
this._status = CONNECTED
this._resolveConnected()
this._resolveConnected = undefined
this._connected.request()
this.emit(CONNECTED)
} catch (error) {
ignoreErrors.call(this.disconnect())
@@ -191,9 +198,7 @@ export class Xapi extends EventEmitter {
}
if (status === CONNECTED) {
this._connected = new Promise(resolve => {
this._resolveConnected = resolve
})
this._connected = Signal.source()
} else {
assert(status === CONNECTING)
}
@@ -207,8 +212,7 @@ export class Xapi extends EventEmitter {
debug('%s: disconnected', this._humanId)
this._status = DISCONNECTED
this._resolveDisconnected()
this._resolveDisconnected = undefined
this._disconnected.request()
this.emit(DISCONNECTED)
}
@@ -482,14 +486,6 @@ export class Xapi extends EventEmitter {
return this._objectsFetched
}
get lastEventFetchedTimestamp() {
return this._lastEventFetchedTimestamp
}
get watchEventsError() {
return this._watchEventsError
}
// ensure we have received all events up to this call
//
// optionally returns the up to date object for the given ref
@@ -667,47 +663,41 @@ export class Xapi extends EventEmitter {
}
_interruptOnDisconnect(promise) {
let listener
let subscription
const pWrapper = new Promise((resolve, reject) => {
subscription = this._disconnected.signal.subscribe(() => {
reject(new Error('disconnected'))
})
promise.then(resolve, reject)
this.on(
DISCONNECTED,
(listener = () => {
reject(new Error('disconnected'))
})
)
})
const clean = () => {
this.removeListener(DISCONNECTED, listener)
subscription.unsubscribe()
}
pWrapper.then(clean, clean)
return pWrapper
}
_sessionCallRetryOptions = {
tries: 2,
when: error =>
this._status !== DISCONNECTED && error?.code === 'SESSION_INVALID',
onRetry: () => this._sessionOpen(),
}
_sessionCall(method, args, timeout) {
async _sessionCall(method, args, timeout) {
if (method.startsWith('session.')) {
return Promise.reject(
new Error('session.*() methods are disabled from this interface')
)
throw new Error('session.*() methods are disabled from this interface')
}
return pRetry(() => {
const sessionId = this._sessionId
assert.notStrictEqual(sessionId, undefined)
const sessionId = this._sessionId
assert.notStrictEqual(sessionId, undefined)
const newArgs = [sessionId]
if (args !== undefined) {
newArgs.push.apply(newArgs, args)
const newArgs = [sessionId]
if (args !== undefined) {
newArgs.push.apply(newArgs, args)
}
return pRetry(
() => this._interruptOnDisconnect(this._call(method, newArgs, timeout)),
{
tries: 2,
when: { code: 'SESSION_INVALID' },
onRetry: () => this._sessionOpen(),
}
return this._call(method, newArgs, timeout)
}, this._sessionCallRetryOptions)
)
}
// FIXME: (probably rare) race condition leading to unnecessary login when:
@@ -734,28 +724,6 @@ export class Xapi extends EventEmitter {
},
}
)
const oldPoolRef = this._pool?.$ref
this._pool = (await this.getAllRecords('pool'))[0]
// if the pool ref has changed, it means that the XAPI has been restarted or
// it's not the same XAPI, we need to refetch the available types and reset
// the event loop in that case
if (this._pool.$ref !== oldPoolRef) {
// Uses introspection to list available types.
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }
types.forEach(type => {
const lcType = type.toLowerCase()
if (lcType !== type) {
this._lcToTypes[lcType] = type
}
})
}
}
_setUrl(url) {
@@ -910,10 +878,8 @@ export class Xapi extends EventEmitter {
async _watchEvents() {
// eslint-disable-next-line no-labels
mainLoop: while (true) {
if (this._resolveObjectsFetched === undefined) {
this._objectsFetched = new Promise(resolve => {
this._resolveObjectsFetched = resolve
})
if (this._objectsFetched.signal.requested) {
this._objectsFetched = Signal.source()
}
await this._connected
@@ -941,8 +907,7 @@ export class Xapi extends EventEmitter {
// initial fetch
await this._refreshCachedRecords(types)
this._resolveObjectsFetched()
this._resolveObjectsFetched = undefined
this._objectsFetched.request()
// event loop
const debounce = this._debounce
@@ -953,28 +918,21 @@ export class Xapi extends EventEmitter {
let result
try {
// don't use _sessionCall because a session failure should break the
// loop and trigger a complete refetch
result = await this._call(
result = await this._sessionCall(
'event.from',
[
this._sessionId,
types,
fromToken,
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
],
EVENT_TIMEOUT * 1e3 * 1.1
)
this._lastEventFetchedTimestamp = Date.now()
this._watchEventsError = undefined
} catch (error) {
const code = error?.code
if (code === 'EVENTS_LOST' || code === 'SESSION_INVALID') {
if (error?.code === 'EVENTS_LOST') {
// eslint-disable-next-line no-labels
continue mainLoop
}
this._watchEventsError = error
console.warn('_watchEvents', error)
await pDelay(this._eventPollDelay)
continue
@@ -996,10 +954,8 @@ export class Xapi extends EventEmitter {
//
// It also has to manually get all objects first.
async _watchEventsLegacy() {
if (this._resolveObjectsFetched === undefined) {
this._objectsFetched = new Promise(resolve => {
this._resolveObjectsFetched = resolve
})
if (this._objectsFetched.signal.requested) {
this._objectsFetched = Signal.source()
}
await this._connected
@@ -1008,8 +964,7 @@ export class Xapi extends EventEmitter {
// initial fetch
await this._refreshCachedRecords(types)
this._resolveObjectsFetched()
this._resolveObjectsFetched = undefined
this._objectsFetched.request()
await this._sessionCall('event.register', [types])
@@ -1047,23 +1002,17 @@ export class Xapi extends EventEmitter {
const getObjectByRef = ref => this._objectsByRef[ref]
Record = defineProperty(
function(ref, data) {
defineProperties(this, {
$id: { value: data.uuid ?? ref },
$ref: { value: ref },
$xapi: { value: xapi },
})
for (let i = 0; i < nFields; ++i) {
const field = fields[i]
this[field] = data[field]
}
},
'name',
{
value: type,
Record = function(ref, data) {
defineProperties(this, {
$id: { value: data.uuid ?? ref },
$ref: { value: ref },
$xapi: { value: xapi },
})
for (let i = 0; i < nFields; ++i) {
const field = fields[i]
this[field] = data[field]
}
)
}
const getters = { $pool: getPool }
const props = { $type: type }
@@ -1083,14 +1032,9 @@ export class Xapi extends EventEmitter {
}
}
props[`add_${field}`] = function(value) {
props[`add_to_${field}`] = function(...values) {
return xapi
.call(`${type}.add_${field}`, this.$ref, value)
.then(noop)
}
props[`remove_${field}`] = function(value) {
return xapi
.call(`${type}.remove_${field}`, this.$ref, value)
.call(`${type}.add_${field}`, this.$ref, values)
.then(noop)
}
} else if (value !== null && typeof value === 'object') {

View File

@@ -25,8 +25,5 @@
},
"dependencies": {
"xo-common": "^0.2.0"
},
"scripts": {
"postversion": "npm publish"
}
}

View File

@@ -43,7 +43,7 @@
"nice-pipe": "0.0.0",
"pretty-ms": "^4.0.0",
"progress-stream": "^2.0.0",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"pump": "^3.0.0",
"pw": "^0.0.4",
"strip-indent": "^2.0.0",
@@ -64,7 +64,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -24,6 +24,7 @@ const nicePipe = require('nice-pipe')
const pairs = require('lodash/toPairs')
const pick = require('lodash/pick')
const pump = require('pump')
const startsWith = require('lodash/startsWith')
const prettyMs = require('pretty-ms')
const progressStream = require('progress-stream')
const pw = require('pw')
@@ -80,7 +81,7 @@ function parseParameters(args) {
const name = matches[1]
let value = matches[2]
if (value.startsWith('json:')) {
if (startsWith(value, 'json:')) {
value = JSON.parse(value.slice(5))
}

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -45,7 +45,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -43,7 +43,7 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@types/node": "^12.0.2",
"@types/node": "^11.11.4",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^8.0.1",
@@ -55,7 +55,6 @@
"lint": "tslint 'src/*.ts'",
"posttest": "yarn run lint",
"prepublishOnly": "yarn run build",
"start": "node dist/index.js",
"postversion": "npm publish"
"start": "node dist/index.js"
}
}

View File

@@ -32,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"jsonrpc-websocket-client": "^0.5.0",
"jsonrpc-websocket-client": "^0.4.1",
"lodash": "^4.17.2",
"make-error": "^1.0.4"
},
@@ -49,7 +49,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,5 +1,6 @@
import JsonRpcWebSocketClient, { OPEN, CLOSED } from 'jsonrpc-websocket-client'
import { BaseError } from 'make-error'
import { startsWith } from 'lodash'
// ===================================================================
@@ -34,7 +35,7 @@ export default class Xo extends JsonRpcWebSocketClient {
}
call(method, args, i) {
if (method.startsWith('session.')) {
if (startsWith(method, 'session.')) {
return Promise.reject(
new XoError('session.*() methods are disabled from this interface')
)

View File

@@ -41,7 +41,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -41,6 +41,5 @@
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -49,6 +49,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.6.5",
"version": "0.6.4",
"license": "AGPL-3.0",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -39,7 +39,7 @@
"inquirer": "^6.0.0",
"ldapjs": "^1.0.1",
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -55,6 +55,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -230,9 +230,10 @@ class AuthLdap {
logger(`attempting to bind as ${entry.objectName}`)
await bind(entry.objectName, password)
logger(
`successfully bound as ${entry.objectName} => ${username} authenticated`
`successfully bound as ${
entry.objectName
} => ${username} authenticated`
)
logger(JSON.stringify(entry, null, 2))
return { username }
} catch (error) {
logger(`failed to bind as ${entry.objectName}: ${error.message}`)

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.6.0",
"version": "0.5.3",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [
@@ -33,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"passport-saml": "^1.1.0"
"passport-saml": "^1.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -24,10 +24,7 @@ export const configurationSchema = {
},
usernameField: {
title: 'Username field',
description: `Field to use as the XO username
You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress\` if you are using Microsoft Azure Active Directory.
`,
description: 'Field to use as the XO username',
type: 'string',
},
},

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.2",
"version": "0.15.0",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -36,7 +36,6 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.1.4",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -44,8 +43,6 @@
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
@@ -58,6 +55,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -1,11 +1,8 @@
import createLogger from '@xen-orchestra/log'
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, groupBy, startCase } from 'lodash'
import { forEach, get, startCase } from 'lodash'
import pkg from '../package'
const logger = createLogger('xo:xo-server-backup-reports')
export const configurationSchema = {
type: 'object',
@@ -49,9 +46,6 @@ export const testSchema = {
// ===================================================================
const INDENT = ' '
const UNKNOWN_ITEM = 'Unknown'
const ICON_FAILURE = '🚨'
const ICON_INTERRUPTED = '⚠️'
const ICON_SKIPPED = '⏩'
@@ -66,7 +60,7 @@ const STATUS_ICON = {
}
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormatter = timezone =>
const createDateFormater = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
@@ -92,6 +86,10 @@ const formatSpeed = (bytes, milliseconds) =>
})
: 'N/A'
const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
@@ -102,116 +100,40 @@ const isSkippedError = error =>
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
error.message === NO_SUCH_OBJECT_ERROR
// ===================================================================
const INDENT = ' '
const createGetTemporalDataMarkdown = formatDate => (
start,
end,
nbIndent = 0
) => {
const indent = INDENT.repeat(nbIndent)
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
const TITLE_BY_STATUS = {
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
interrupted: n => `## ${n} Interrupted`,
skipped: n => `## ${n} Skipped`,
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
}
const getTemporalDataMarkdown = (end, start, formatDate) => {
const markdown = [`- **Start time**: ${formatDate(start)}`]
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
if (end !== undefined) {
markdown.push(`- **End time**: ${formatDate(end)}`)
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
const duration = end - start
if (duration >= 1) {
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
}
}
return markdown
}
const getWarningsMarkdown = (warnings = []) =>
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
const getErrorMarkdown = task => {
let message
if (
task.status === 'success' ||
(message = task.result?.message ?? task.result?.code) === undefined
) {
const addWarnings = (text, warnings, nbIndent = 0) => {
if (warnings === undefined) {
return
}
const label = task.status === 'skipped' ? 'Reason' : 'Error'
return `- **${label}**: ${message}`
const indent = INDENT.repeat(nbIndent)
warnings.forEach(({ message }) => {
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
})
}
const MARKDOWN_BY_TYPE = {
pool(task, { formatDate }) {
const { id, pool = {}, poolMaster = {} } = task.data
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
return {
body: [
pool.uuid !== undefined
? `- **UUID**: ${pool.uuid}`
: `- **ID**: ${id}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[pool] ${name}`,
}
},
xo(task, { formatDate, jobName }) {
return {
body: [
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[XO] ${jobName}`,
}
},
async remote(task, { formatDate, xo }) {
const id = task.data.id
const name = await xo.getRemote(id).then(
({ name }) => name,
error => {
logger.warn(error)
return UNKNOWN_ITEM
}
)
return {
body: [
`- **ID**: ${id}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[remote] ${name}`,
}
},
}
const getMarkdown = (task, props) =>
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
const toMarkdown = parts => {
const lines = []
let indentLevel = 0
const helper = part => {
if (typeof part === 'string') {
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
} else if (Array.isArray(part)) {
++indentLevel
part.forEach(helper)
--indentLevel
}
}
helper(parts)
return lines.join('\n')
}
// ===================================================================
class BackupReportsXoPlugin {
constructor(xo) {
this._xo = xo
this._report = this._report.bind(this)
this._report = this._wrapper.bind(this)
}
configure({ toMails, toXmpp }) {
@@ -224,174 +146,76 @@ class BackupReportsXoPlugin {
}
test({ runId }) {
return this._report(runId, undefined, true)
return this._backupNgListener(undefined, undefined, undefined, runId)
}
unload() {
this._xo.removeListener('job:terminated', this._report)
}
async _report(runJobId, { type, status } = {}, force) {
const xo = this._xo
try {
if (type === 'call') {
return this._legacyVmHandler(status)
}
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
}
const reportWhen = log.data.reportWhen
if (
!force &&
(reportWhen === 'never' ||
// Handle improper value introduced by:
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
reportWhen === 'Never' ||
(reportWhen === 'failure' && log.status === 'success'))
) {
return
}
const [job, schedule] = await Promise.all([
await xo.getJob(log.jobId),
await xo.getSchedule(log.scheduleId).catch(error => {
logger.warn(error)
}),
])
if (job.type === 'backup') {
return this._ngVmHandler(log, job, schedule, force)
} else if (job.type === 'metadataBackup') {
return this._metadataHandler(log, job, schedule, force)
}
throw new Error(`Unknown backup job type: ${job.type}`)
} catch (error) {
logger.warn(error)
_wrapper(status, job, schedule, runJobId) {
if (job.type === 'metadataBackup') {
return
}
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule, runJobId)
: this._listener(status, job, schedule, runJobId)
)
).catch(logError)
}
async _metadataHandler(log, { name: jobName }, schedule, force) {
async _backupNgListener(_1, _2, schedule, runJobId) {
const xo = this._xo
const formatDate = createDateFormatter(schedule?.timezone)
const tasksByStatus = groupBy(log.tasks, 'status')
const n = log.tasks?.length ?? 0
const nSuccesses = tasksByStatus.success?.length ?? 0
if (!force && log.data.reportWhen === 'failure') {
delete tasksByStatus.success
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
}
// header
const markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Job name**: ${jobName}`,
`- **Run ID**: ${log.id}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
...getWarningsMarkdown(log.warnings),
getErrorMarkdown(log),
]
const nagiosText = []
// body
for (const status of STATUS) {
const tasks = tasksByStatus[status]
if (tasks === undefined) {
continue
}
// tasks header
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
// tasks body
for (const task of tasks) {
const taskMarkdown = await getMarkdown(task, {
formatDate,
jobName: log.jobName,
})
if (taskMarkdown === undefined) {
continue
}
const { title, body } = taskMarkdown
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
if (task.status !== 'success') {
nagiosText.push(`[${task.status}] ${title}`)
}
for (const subTask of task.tasks ?? []) {
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
if (taskMarkdown === undefined) {
continue
}
const icon = STATUS_ICON[subTask.status]
const { title, body } = taskMarkdown
subMarkdown.push([
`- **${title}** ${icon}`,
[...body, ...getWarningsMarkdown(subTask.warnings)],
])
}
markdown.push('', '', `### ${title}`, ...subMarkdown)
}
}
// footer
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
subject: `[Xen Orchestra] ${log.status} Metadata backup report for ${
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${log.jobName}`
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
log.jobName
} - ${nagiosText.join(' ')}`,
})
}
async _ngVmHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const { reportWhen, mode } = log.data || {}
if (
reportWhen === 'never' ||
(log.status === 'success' && reportWhen === 'failure')
) {
return
}
const formatDate = createDateFormatter(schedule?.timezone)
if (schedule === undefined) {
schedule = await xo.getSchedule(log.scheduleId)
}
if (log.tasks === undefined) {
const markdown = [
const jobName = (await xo.getJob(log.jobId, 'backup')).name
const formatDate = createDateFormater(schedule.timezone)
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
if (
(log.status === 'failure' || log.status === 'skipped') &&
log.result !== undefined
) {
let markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${log.id}`,
`- **Run ID**: ${runJobId}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
getErrorMarkdown(log),
...getWarningsMarkdown(log.warnings),
'---',
'',
`*${pkg.name} v${pkg.version}*`,
...getTemporalDataMarkdown(log.start, log.end),
`- **Error**: ${log.result.message}`,
]
addWarnings(markdown, log.warnings)
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${log.status}] Backup report for ${jobName} - Error : ${log.result.message}`,
nagiosMarkdown: `[Xen Orchestra] [${
log.status
}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
@@ -407,7 +231,7 @@ class BackupReportsXoPlugin {
let nSkipped = 0
let nInterrupted = 0
for (const taskLog of log.tasks) {
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
if (taskLog.status === 'success' && reportWhen === 'failure') {
continue
}
@@ -420,16 +244,16 @@ class BackupReportsXoPlugin {
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
...getWarningsMarkdown(taskLog.warnings),
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
]
addWarnings(text, taskLog.warnings)
const failedSubTasks = []
const snapshotText = []
const srsText = []
const remotesText = []
for (const subTaskLog of taskLog.tasks ?? []) {
for (const subTaskLog of taskLog.tasks || []) {
if (
subTaskLog.message !== 'export' &&
subTaskLog.message !== 'snapshot'
@@ -438,36 +262,29 @@ class BackupReportsXoPlugin {
}
const icon = STATUS_ICON[subTaskLog.status]
const type = subTaskLog.data?.type
const errorMarkdown = getErrorMarkdown(subTaskLog)
const errorMessage = ` - **Error**: ${get(
subTaskLog.result,
'message'
)}`
if (subTaskLog.message === 'snapshot') {
snapshotText.push(`- **Snapshot** ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
])
} else if (type === 'remote') {
snapshotText.push(
`- **Snapshot** ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
)
} else if (subTaskLog.data.type === 'remote') {
const id = subTaskLog.data.id
const remote = await xo.getRemote(id).catch(error => {
logger.warn(error)
})
const title = remote !== undefined ? remote.name : `Remote Not found`
remotesText.push(`- **${title}** (${id}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
const remote = await xo.getRemote(id).catch(() => {})
remotesText.push(
` - **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${id}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(remotesText, subTaskLog.warnings, 2)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : id)
remotesText.push('', errorMessage)
}
} else {
const id = subTaskLog.data.id
@@ -477,17 +294,14 @@ class BackupReportsXoPlugin {
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
srsText.push(
` - **${srName}** (${srUuid}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(srsText, subTaskLog.warnings, 2)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
srsText.push('', errorMessage)
}
}
@@ -499,48 +313,53 @@ class BackupReportsXoPlugin {
return
}
const size = operationLog.result?.size
if (size > 0) {
const operationInfoText = []
addWarnings(operationInfoText, operationLog.warnings, 3)
if (operationLog.status === 'success') {
const size = operationLog.result.size
if (operationLog.message === 'merge') {
globalMergeSize += size
} else {
globalTransferSize += size
}
}
operationInfoText.push(
` - **Size**: ${formatSize(size)}`,
` - **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`
)
} else if (get(operationLog.result, 'message') !== undefined) {
operationInfoText.push(
` - **Error**: ${get(operationLog.result, 'message')}`
)
}
const operationText = [
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
[
...getTemporalDataMarkdown(
operationLog.end,
operationLog.start,
formatDate
),
size > 0 && `- **Size**: ${formatSize(size)}`,
size > 0 &&
`- **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`,
...getWarningsMarkdown(operationLog.warnings),
getErrorMarkdown(operationLog),
],
]
if (type === 'remote') {
` - **${operationLog.message}** ${
STATUS_ICON[operationLog.status]
}`,
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
...operationInfoText,
].join('\n')
if (get(subTaskLog, 'data.type') === 'remote') {
remotesText.push(operationText)
} else if (type === 'SR') {
remotesText.join('\n')
}
if (get(subTaskLog, 'data.type') === 'SR') {
srsText.push(operationText)
srsText.join('\n')
}
})
}
const subText = [
...snapshotText,
srsText.length !== 0 && `- **SRs**`,
srsText,
remotesText.length !== 0 && `- **Remotes**`,
remotesText,
]
if (srsText.length !== 0) {
srsText.unshift(`- **SRs**`)
}
if (remotesText.length !== 0) {
remotesText.unshift(`- **Remotes**`)
}
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
if (taskLog.result !== undefined) {
if (taskLog.status === 'skipped') {
++nSkipped
@@ -550,7 +369,8 @@ class BackupReportsXoPlugin {
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: taskLog.result.message
}`
}`,
''
)
nagiosText.push(
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -559,7 +379,11 @@ class BackupReportsXoPlugin {
)
} else {
++nFailures
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
failedVmsText.push(
...text,
`- **Error**: ${taskLog.result.message}`,
''
)
nagiosText.push(
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -570,7 +394,7 @@ class BackupReportsXoPlugin {
} else {
if (taskLog.status === 'failure') {
++nFailures
failedVmsText.push(...text, ...subText)
failedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[${
vm !== undefined ? vm.name_label : 'undefined'
@@ -578,34 +402,37 @@ class BackupReportsXoPlugin {
)
} else if (taskLog.status === 'interrupted') {
++nInterrupted
interruptedVmsText.push(...text, ...subText)
interruptedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
)
} else {
successfulVmsText.push(...text, ...subText)
successfulVmsText.push(...text, '', '', ...subText, '')
}
}
}
const nVms = log.tasks.length
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
const markdown = [
let markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${log.id}`,
`- **Run ID**: ${runJobId}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
...getTemporalDataMarkdown(log.start, log.end),
`- **Successes**: ${nSuccesses} / ${nVms}`,
globalTransferSize !== 0 &&
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
globalMergeSize !== 0 &&
`- **Merge size**: ${formatSize(globalMergeSize)}`,
...getWarningsMarkdown(log.warnings),
'',
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
addWarnings(markdown, log.warnings)
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
@@ -630,7 +457,7 @@ class BackupReportsXoPlugin {
)
}
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
if (nSuccesses !== 0 && reportWhen !== 'failure') {
markdown.push(
'---',
'',
@@ -641,8 +468,9 @@ class BackupReportsXoPlugin {
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown: toMarkdown(markdown),
markdown,
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
@@ -682,9 +510,9 @@ class BackupReportsXoPlugin {
])
}
_legacyVmHandler(status) {
_listener(status) {
const { calls, timezone, error } = status
const formatDate = createDateFormatter(timezone)
const formatDate = createDateFormater(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =
@@ -709,7 +537,9 @@ class BackupReportsXoPlugin {
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${error.message}`,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
error.message
}`,
})
}

View File

@@ -33,7 +33,7 @@
},
"dependencies": {
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.5.0"
"jsonrpc-websocket-client": "^0.4.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -49,6 +49,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -44,6 +44,5 @@
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -189,7 +189,9 @@ export default class DensityPlan extends Plan {
const { vm, destination } = move
const xapiDest = this.xo.getXapi(destination)
debug(
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${vm.$container}).`
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
vm.$container
}).`
)
return xapiDest.migrateVm(
vm._xapiId,

View File

@@ -126,7 +126,9 @@ export default class PerformancePlan extends Plan {
destinationAverages.memoryFree -= vmAverages.memory
debug(
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${exceededHost.id}).`
`Migrate VM (${vm.id}) to Host (${destination.id}) from Host (${
exceededHost.id
}).`
)
optimizationsCount++
@@ -141,7 +143,9 @@ export default class PerformancePlan extends Plan {
await Promise.all(promises)
debug(
`Performance mode: ${optimizationsCount} optimizations for Host (${exceededHost.id}).`
`Performance mode: ${optimizationsCount} optimizations for Host (${
exceededHost.id
}).`
)
}
}

View File

@@ -42,6 +42,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -183,7 +183,9 @@ export const configurationSchema = {
description: Object.keys(HOST_FUNCTIONS)
.map(
k =>
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${HOST_FUNCTIONS[k].description}`
` * ${k} (${HOST_FUNCTIONS[k].unit}): ${
HOST_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
@@ -231,7 +233,9 @@ export const configurationSchema = {
description: Object.keys(VM_FUNCTIONS)
.map(
k =>
` * ${k} (${VM_FUNCTIONS[k].unit}): ${VM_FUNCTIONS[k].description}`
` * ${k} (${VM_FUNCTIONS[k].unit}): ${
VM_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
@@ -280,7 +284,9 @@ export const configurationSchema = {
description: Object.keys(SR_FUNCTIONS)
.map(
k =>
` * ${k} (${SR_FUNCTIONS[k].unit}): ${SR_FUNCTIONS[k].description}`
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
SR_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
@@ -408,7 +414,9 @@ ${monitorBodies.join('\n')}`
}
_parseDefinition(definition) {
const alarmId = `${definition.objectType}|${definition.variableName}|${definition.alarmTriggerLevel}`
const alarmId = `${definition.objectType}|${definition.variableName}|${
definition.alarmTriggerLevel
}`
const typeFunction =
TYPE_FUNCTION_MAP[definition.objectType][definition.variableName]
const parseData = (result, uuid) => {
@@ -460,7 +468,9 @@ ${monitorBodies.join('\n')}`
...definition,
alarmId,
vmFunction: typeFunction,
title: `${typeFunction.name} ${definition.comparator} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
title: `${typeFunction.name} ${definition.comparator} ${
definition.alarmTriggerLevel
}${typeFunction.unit}`,
snapshot: async () => {
return Promise.all(
map(definition.uuids, async uuid => {
@@ -654,7 +664,9 @@ ${entry.listItem}
subject: `[Xen Orchestra] Performance Alert ${subjectSuffix}`,
markdown:
markdownBody +
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${this._configuration.baseUrl}#/settings/plugins)\n`,
`\n\n\nSent from Xen Orchestra [perf-alert plugin](${
this._configuration.baseUrl
}#/settings/plugins)\n`,
})
} else {
throw new Error('The email alert system has a configuration issue.')

View File

@@ -1,3 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,43 +0,0 @@
# xo-server-sdn-controller [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
XO Server plugin that allows the creation of pool-wide private networks.
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Usage
### Network creation
In the network creation view, select a `pool` and `Private network`.
Create the network.
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
### Configuration
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
The plugin's configuration contains:
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
If none is provided, the plugin will create its own self-signed certificates.
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
## Contributions
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
AGPL3 © [Vates SAS](http://vates.fr)

View File

@@ -1,36 +0,0 @@
{
"name": "xo-server-sdn-controller",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-sdn-controller",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-sdn-controller",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "./dist",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.1.1",
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.4.4",
"cross-env": "^5.2.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.1.4",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.84",
"promise-toolbox": "^0.13.0"
},
"private": true
}

View File

@@ -1,780 +0,0 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import NodeOpenssl from 'node-openssl-cert'
import { access, constants, readFile, writeFile } from 'fs'
import { EventEmitter } from 'events'
import { filter, find, forOwn, map } from 'lodash'
import { fromCallback, fromEvent } from 'promise-toolbox'
import { join } from 'path'
import { OvsdbClient } from './ovsdb-client'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller')
const PROTOCOL = 'pssl'
const CA_CERT = 'ca-cert.pem'
const CLIENT_KEY = 'client-key.pem'
const CLIENT_CERT = 'client-cert.pem'
const SDN_CONTROLLER_CERT = 'sdn-controller-ca.pem'
const NB_DAYS = 9999
// =============================================================================
export const configurationSchema = {
type: 'object',
properties: {
'cert-dir': {
description: `Full path to a directory where to find: \`client-cert.pem\`,
\`client-key.pem\` and \`ca-cert.pem\` to create ssl connections with hosts.
If none is provided, the plugin will create its own self-signed certificates.`,
type: 'string',
},
'override-certs': {
description: `Replace already existing SDN controller CA certificate`,
type: 'boolean',
default: false,
},
},
}
// =============================================================================
async function fileWrite(path, data) {
await fromCallback(writeFile, path, data)
log.debug(`${path} successfully written`)
}
async function fileRead(path) {
const result = await fromCallback(readFile, path)
return result
}
async function fileExists(path) {
try {
await fromCallback(access, path, constants.F_OK)
} catch (error) {
if (error.code === 'ENOENT') {
return false
}
throw error
}
return true
}
// =============================================================================
class SDNController extends EventEmitter {
constructor({ xo, getDataDir }) {
super()
this._xo = xo
this._getDataDir = getDataDir
this._clientKey = null
this._clientCert = null
this._caCert = null
this._poolNetworks = []
this._ovsdbClients = []
this._newHosts = []
this._networks = new Map()
this._starCenters = new Map()
this._cleaners = []
this._objectsAdded = this._objectsAdded.bind(this)
this._objectsUpdated = this._objectsUpdated.bind(this)
this._overrideCerts = false
this._unsetApiMethod = null
}
// ---------------------------------------------------------------------------
async configure(configuration) {
this._overrideCerts = configuration['override-certs']
let certDirectory = configuration['cert-dir']
if (certDirectory == null) {
log.debug(`No cert-dir provided, using default self-signed certificates`)
certDirectory = await this._getDataDir()
if (!(await fileExists(join(certDirectory, CA_CERT)))) {
// If one certificate doesn't exist, none should
assert(
!(await fileExists(join(certDirectory, CLIENT_KEY))),
`${CLIENT_KEY} should not exist`
)
assert(
!(await fileExists(join(certDirectory, CLIENT_CERT))),
`${CLIENT_CERT} should not exist`
)
log.debug(`No default self-signed certificates exists, creating them`)
await this._generateCertificatesAndKey(certDirectory)
}
}
// TODO: verify certificates and create new certificates if needed
;[this._clientKey, this._clientCert, this._caCert] = await Promise.all([
fileRead(join(certDirectory, CLIENT_KEY)),
fileRead(join(certDirectory, CLIENT_CERT)),
fileRead(join(certDirectory, CA_CERT)),
])
this._ovsdbClients.forEach(client => {
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
})
const updatedPools = []
for (const poolNetwork of this._poolNetworks) {
if (updatedPools.includes(poolNetwork.pool)) {
continue
}
const xapi = this._xo.getXapi(poolNetwork.pool)
await this._installCaCertificateIfNeeded(xapi)
updatedPools.push(poolNetwork.pool)
}
}
async load() {
const createPrivateNetwork = this._createPrivateNetwork.bind(this)
createPrivateNetwork.description =
'Creates a pool-wide private network on a selected pool'
createPrivateNetwork.params = {
poolId: { type: 'string' },
networkName: { type: 'string' },
networkDescription: { type: 'string' },
encapsulation: { type: 'string' },
}
createPrivateNetwork.resolve = {
xoPool: ['poolId', 'pool', ''],
}
this._unsetApiMethod = this._xo.addApiMethod(
'plugin.SDNController.createPrivateNetwork',
createPrivateNetwork
)
// FIXME: we should monitor when xapis are added/removed
forOwn(this._xo.getAllXapis(), async xapi => {
await xapi.objectsFetched
if (this._setControllerNeeded(xapi) === false) {
this._cleaners.push(await this._manageXapi(xapi))
const hosts = filter(xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
this._createOvsdbClient(host)
})
)
// Add already existing pool-wide private networks
const networks = filter(xapi.objects.all, { $type: 'network' })
forOwn(networks, async network => {
if (network.other_config.private_pool_wide === 'true') {
log.debug(
`Adding network: '${network.name_label}' for pool: '${network.$pool.name_label}' to managed networks`
)
const center = await this._electNewCenter(network, true)
this._poolNetworks.push({
pool: network.$pool.$ref,
network: network.$ref,
starCenter: center?.$ref,
})
this._networks.set(network.$id, network.$ref)
if (center != null) {
this._starCenters.set(center.$id, center.$ref)
}
}
})
}
})
}
async unload() {
this._ovsdbClients = []
this._poolNetworks = []
this._newHosts = []
this._networks.clear()
this._starCenters.clear()
this._cleaners.forEach(cleaner => cleaner())
this._cleaners = []
this._unsetApiMethod()
}
// ===========================================================================
async _createPrivateNetwork({
xoPool,
networkName,
networkDescription,
encapsulation,
}) {
const pool = this._xo.getXapiObject(xoPool)
await this._setPoolControllerIfNeeded(pool)
// Create the private network
const privateNetworkRef = await pool.$xapi.call('network.create', {
name_label: networkName,
name_description: networkDescription,
MTU: 0,
other_config: {
automatic: 'false',
private_pool_wide: 'true',
encapsulation: encapsulation,
},
})
const privateNetwork = await pool.$xapi._getOrWaitObject(privateNetworkRef)
log.info(
`Private network '${privateNetwork.name_label}' has been created for pool '${pool.name_label}'`
)
// For each pool's host, create a tunnel to the private network
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
await this._createTunnel(host, privateNetwork)
this._createOvsdbClient(host)
})
)
const center = await this._electNewCenter(privateNetwork, false)
this._poolNetworks.push({
pool: pool.$ref,
network: privateNetwork.$ref,
starCenter: center?.$ref,
encapsulation: encapsulation,
})
this._networks.set(privateNetwork.$id, privateNetwork.$ref)
if (center != null) {
this._starCenters.set(center.$id, center.$ref)
}
}
// ---------------------------------------------------------------------------
async _manageXapi(xapi) {
const { objects } = xapi
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
objects.on('add', this._objectsAdded)
objects.on('update', this._objectsUpdated)
objects.on('remove', objectsRemovedXapi)
await this._installCaCertificateIfNeeded(xapi)
return () => {
objects.removeListener('add', this._objectsAdded)
objects.removeListener('update', this._objectsUpdated)
objects.removeListener('remove', objectsRemovedXapi)
}
}
async _objectsAdded(objects) {
await Promise.all(
map(objects, async object => {
const { $type } = object
if ($type === 'host') {
log.debug(
`New host: '${object.name_label}' in pool: '${object.$pool.name_label}'`
)
if (find(this._newHosts, { $ref: object.$ref }) == null) {
this._newHosts.push(object)
}
this._createOvsdbClient(object)
}
})
)
}
async _objectsUpdated(objects) {
await Promise.all(
map(objects, async (object, id) => {
const { $type } = object
if ($type === 'PIF') {
await this._pifUpdated(object)
} else if ($type === 'host') {
await this._hostUpdated(object)
}
})
)
}
async _objectsRemoved(xapi, objects) {
await Promise.all(
map(objects, async (object, id) => {
const client = find(this._ovsdbClients, { id: id })
if (client != null) {
this._ovsdbClients.splice(this._ovsdbClients.indexOf(client), 1)
}
// If a Star center host is removed: re-elect a new center where needed
const starCenterRef = this._starCenters.get(id)
if (starCenterRef != null) {
this._starCenters.delete(id)
const poolNetworks = filter(this._poolNetworks, {
starCenter: starCenterRef,
})
for (const poolNetwork of poolNetworks) {
const network = xapi.getObjectByRef(poolNetwork.network)
const newCenter = await this._electNewCenter(network, true)
poolNetwork.starCenter = newCenter?.$ref
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
}
return
}
// If a network is removed, clean this._poolNetworks from it
const networkRef = this._networks.get(id)
if (networkRef != null) {
this._networks.delete(id)
const poolNetwork = find(this._poolNetworks, {
network: networkRef,
})
if (poolNetwork != null) {
this._poolNetworks.splice(
this._poolNetworks.indexOf(poolNetwork),
1
)
}
}
})
)
}
async _pifUpdated(pif) {
// Only if PIF is in a private network
const poolNetwork = find(this._poolNetworks, { network: pif.network })
if (poolNetwork == null) {
return
}
if (!pif.currently_attached) {
if (poolNetwork.starCenter !== pif.host) {
return
}
log.debug(
`PIF: '${pif.device}' of network: '${pif.$network.name_label}' star-center host: '${pif.$host.name_label}' has been unplugged, electing a new host`
)
const newCenter = await this._electNewCenter(pif.$network, true)
poolNetwork.starCenter = newCenter?.$ref
this._starCenters.delete(pif.$host.$id)
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
} else {
if (poolNetwork.starCenter == null) {
const host = pif.$host
log.debug(
`First available host: '${host.name_label}' becomes star center of network: '${pif.$network.name_label}'`
)
poolNetwork.starCenter = pif.host
this._starCenters.set(host.$id, host.$ref)
}
log.debug(
`PIF: '${pif.device}' of network: '${pif.$network.name_label}' host: '${pif.$host.name_label}' has been plugged`
)
const starCenter = pif.$xapi.getObjectByRef(poolNetwork.starCenter)
await this._addHostToNetwork(pif.$host, pif.$network, starCenter)
}
}
async _hostUpdated(host) {
const xapi = host.$xapi
if (host.enabled) {
if (host.PIFs.length === 0) {
return
}
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
const newHost = find(this._newHosts, { $ref: host.$ref })
if (newHost != null) {
this._newHosts.splice(this._newHosts.indexOf(newHost), 1)
try {
await xapi.call('pool.certificate_sync')
} catch (error) {
log.error(
`Couldn't sync SDN controller ca certificate in pool: '${host.$pool.name_label}' because: ${error}`
)
}
}
for (const tunnel of tunnels) {
const accessPIF = xapi.getObjectByRef(tunnel.access_PIF)
if (accessPIF.host !== host.$ref) {
continue
}
const poolNetwork = find(this._poolNetworks, {
network: accessPIF.network,
})
if (poolNetwork == null) {
continue
}
if (accessPIF.currently_attached) {
continue
}
log.debug(
`Pluging PIF: '${accessPIF.device}' for host: '${host.name_label}' on network: '${accessPIF.$network.name_label}'`
)
try {
await xapi.call('PIF.plug', accessPIF.$ref)
} catch (error) {
log.error(
`XAPI error while pluging PIF: '${accessPIF.device}' on host: '${host.name_label}' for network: '${accessPIF.$network.name_label}'`
)
}
const starCenter = host.$xapi.getObjectByRef(poolNetwork.starCenter)
await this._addHostToNetwork(host, accessPIF.$network, starCenter)
}
} else {
const poolNetworks = filter(this._poolNetworks, { starCenter: host.$ref })
for (const poolNetwork of poolNetworks) {
const network = host.$xapi.getObjectByRef(poolNetwork.network)
log.debug(
`Star center host: '${host.name_label}' of network: '${network.name_label}' in pool: '${host.$pool.name_label}' is no longer reachable, electing a new host`
)
const newCenter = await this._electNewCenter(network, true)
poolNetwork.starCenter = newCenter?.$ref
this._starCenters.delete(host.$id)
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
}
}
}
// ---------------------------------------------------------------------------
async _setPoolControllerIfNeeded(pool) {
if (!this._setControllerNeeded(pool.$xapi)) {
// Nothing to do
return
}
const controller = find(pool.$xapi.objects.all, { $type: 'SDN_controller' })
if (controller != null) {
await pool.$xapi.call('SDN_controller.forget', controller.$ref)
log.debug(`Remove old SDN controller from pool: '${pool.name_label}'`)
}
await pool.$xapi.call('SDN_controller.introduce', PROTOCOL)
log.debug(`Set SDN controller of pool: '${pool.name_label}'`)
this._cleaners.push(await this._manageXapi(pool.$xapi))
}
_setControllerNeeded(xapi) {
const controller = find(xapi.objects.all, { $type: 'SDN_controller' })
return !(
controller != null &&
controller.protocol === PROTOCOL &&
controller.address === '' &&
controller.port === 0
)
}
// ---------------------------------------------------------------------------
async _installCaCertificateIfNeeded(xapi) {
let needInstall = false
try {
const result = await xapi.call('pool.certificate_list')
if (!result.includes(SDN_CONTROLLER_CERT)) {
needInstall = true
} else if (this._overrideCerts) {
await xapi.call('pool.certificate_uninstall', SDN_CONTROLLER_CERT)
log.debug(
`Old SDN Controller CA certificate uninstalled on pool: '${xapi.pool.name_label}'`
)
needInstall = true
}
} catch (error) {
log.error(
`Couldn't retrieve certificate list of pool: '${xapi.pool.name_label}'`
)
}
if (!needInstall) {
return
}
try {
await xapi.call(
'pool.certificate_install',
SDN_CONTROLLER_CERT,
this._caCert.toString()
)
await xapi.call('pool.certificate_sync')
log.debug(
`SDN controller CA certificate install in pool: '${xapi.pool.name_label}'`
)
} catch (error) {
log.error(
`Couldn't install SDN controller CA certificate in pool: '${xapi.pool.name_label}' because: ${error}`
)
}
}
// ---------------------------------------------------------------------------
async _electNewCenter(network, resetNeeded) {
const pool = network.$pool
let newCenter = null
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
if (resetNeeded) {
// Clean old ports and interfaces
const hostClient = find(this._ovsdbClients, { host: host.$ref })
if (hostClient != null) {
try {
await hostClient.resetForNetwork(network.uuid, network.name_label)
} catch (error) {
log.error(
`Couldn't reset network: '${network.name_label}' for host: '${host.name_label}' in pool: '${network.$pool.name_label}' because: ${error}`
)
return
}
}
}
if (newCenter != null) {
return
}
const pif = find(host.$PIFs, { network: network.$ref })
if (pif != null && pif.currently_attached && host.enabled) {
newCenter = host
}
})
)
if (newCenter == null) {
log.error(
`Unable to elect a new star-center host to network: '${network.name_label}' for pool: '${network.$pool.name_label}' because there's no available host`
)
return null
}
// Recreate star topology
await Promise.all(
await map(hosts, async host => {
await this._addHostToNetwork(host, network, newCenter)
})
)
log.info(
`New star center host elected: '${newCenter.name_label}' in network: '${network.name_label}'`
)
return newCenter
}
async _createTunnel(host, network) {
const pif = host.$PIFs.find(
pif => pif.physical && pif.ip_configuration_mode !== 'None'
)
if (pif == null) {
log.error(
`No PIF found to create tunnel on host: '${host.name_label}' for network: '${network.name_label}'`
)
return
}
await host.$xapi.call('tunnel.create', pif.$ref, network.$ref)
log.debug(
`Tunnel added on host '${host.name_label}' for network '${network.name_label}'`
)
}
async _addHostToNetwork(host, network, starCenter) {
if (host.$ref === starCenter.$ref) {
// Nothing to do
return
}
const hostClient = find(this._ovsdbClients, {
host: host.$ref,
})
if (hostClient == null) {
log.error(`No OVSDB client found for host: '${host.name_label}'`)
return
}
const starCenterClient = find(this._ovsdbClients, {
host: starCenter.$ref,
})
if (starCenterClient == null) {
log.error(
`No OVSDB client found for star-center host: '${starCenter.name_label}'`
)
return
}
const encapsulation =
network.other_config.encapsulation != null
? network.other_config.encapsulation
: 'gre'
try {
await hostClient.addInterfaceAndPort(
network.uuid,
network.name_label,
starCenterClient.address,
encapsulation
)
await starCenterClient.addInterfaceAndPort(
network.uuid,
network.name_label,
hostClient.address,
encapsulation
)
} catch (error) {
log.error(
`Couldn't add host: '${host.name_label}' to network: '${network.name_label}' in pool: '${host.$pool.name_label}' because: ${error}`
)
}
}
// ---------------------------------------------------------------------------
_createOvsdbClient(host) {
const foundClient = find(this._ovsdbClients, { host: host.$ref })
if (foundClient != null) {
return foundClient
}
const client = new OvsdbClient(
host,
this._clientKey,
this._clientCert,
this._caCert
)
this._ovsdbClients.push(client)
return client
}
// ---------------------------------------------------------------------------
async _generateCertificatesAndKey(dataDir) {
const openssl = new NodeOpenssl()
const rsakeyoptions = {
rsa_keygen_bits: 4096,
format: 'PKCS8',
}
const subject = {
countryName: 'XX',
localityName: 'Default City',
organizationName: 'Default Company LTD',
}
const csroptions = {
hash: 'sha256',
startdate: new Date('1984-02-04 00:00:00'),
enddate: new Date('2143-06-04 04:16:23'),
subject: subject,
}
const cacsroptions = {
hash: 'sha256',
days: NB_DAYS,
subject: subject,
}
openssl.generateRSAPrivateKey(rsakeyoptions, (err, cakey, cmd) => {
if (err) {
log.error(`Error while generating CA private key: ${err}`)
return
}
openssl.generateCSR(cacsroptions, cakey, null, (err, csr, cmd) => {
if (err) {
log.error(`Error while generating CA certificate: ${err}`)
return
}
openssl.selfSignCSR(
csr,
cacsroptions,
cakey,
null,
async (err, cacrt, cmd) => {
if (err) {
log.error(`Error while signing CA certificate: ${err}`)
return
}
await fileWrite(join(dataDir, CA_CERT), cacrt)
openssl.generateRSAPrivateKey(
rsakeyoptions,
async (err, key, cmd) => {
if (err) {
log.error(`Error while generating private key: ${err}`)
return
}
await fileWrite(join(dataDir, CLIENT_KEY), key)
openssl.generateCSR(csroptions, key, null, (err, csr, cmd) => {
if (err) {
log.error(`Error while generating certificate: ${err}`)
return
}
openssl.CASignCSR(
csr,
cacsroptions,
false,
cacrt,
cakey,
null,
async (err, crt, cmd) => {
if (err) {
log.error(`Error while signing certificate: ${err}`)
return
}
await fileWrite(join(dataDir, CLIENT_CERT), crt)
this.emit('certWritten')
}
)
})
}
)
}
)
})
})
await fromEvent(this, 'certWritten', {})
log.debug('All certificates have been successfully written')
}
}
export default opts => new SDNController(opts)

View File

@@ -1,481 +0,0 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import forOwn from 'lodash/forOwn'
import fromEvent from 'promise-toolbox/fromEvent'
import { connect } from 'tls'
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
const OVSDB_PORT = 6640
// =============================================================================
export class OvsdbClient {
constructor(host, clientKey, clientCert, caCert) {
this._host = host
this._numberOfPortAndInterface = 0
this._requestID = 0
this.updateCertificates(clientKey, clientCert, caCert)
log.debug(`[${this._host.name_label}] New OVSDB client`)
}
// ---------------------------------------------------------------------------
get address() {
return this._host.address
}
get host() {
return this._host.$ref
}
get id() {
return this._host.$id
}
updateCertificates(clientKey, clientCert, caCert) {
this._clientKey = clientKey
this._clientCert = clientCert
this._caCert = caCert
log.debug(`[${this._host.name_label}] Certificates have been updated`)
}
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
networkUuid,
networkName,
remoteAddress,
encapsulation
) {
const socket = await this._connect()
const index = this._numberOfPortAndInterface
++this._numberOfPortAndInterface
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid == null) {
socket.destroy()
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
return
}
const interfaceName = 'tunnel_iface' + index
const portName = 'tunnel_port' + index
// Add interface and port to the bridge
const options = ['map', [['remote_ip', remoteAddress]]]
const addInterfaceOperation = {
op: 'insert',
table: 'Interface',
row: {
type: encapsulation,
options: options,
name: interfaceName,
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_iface',
}
const addPortOperation = {
op: 'insert',
table: 'Port',
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_port',
}
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
'Open_vSwitch',
addInterfaceOperation,
addPortOperation,
mutateBridgeOperation,
]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
socket.destroy()
return
}
let error
let details
let i = 0
let opResult
do {
opResult = jsonObjects[0].result[i]
if (opResult != null && opResult.error != null) {
error = opResult.error
details = opResult.details
}
++i
} while (opResult && !error)
if (error != null) {
log.error(
`[${this._host.name_label}] Error while adding port: '${portName}' and interface: '${interfaceName}' to bridge: '${bridgeName}' on network: '${networkName}' because: ${error}: ${details}`
)
socket.destroy()
return
}
log.debug(
`[${this._host.name_label}] Port: '${portName}' and interface: '${interfaceName}' added to bridge: '${bridgeName}' on network: '${networkName}'`
)
socket.destroy()
}
async resetForNetwork(networkUuid, networkName) {
const socket = await this._connect()
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid == null) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports == null) {
socket.destroy()
return
}
const portsToDelete = []
for (const port of ports) {
const portUuid = port[1]
const where = [['_uuid', '==', ['uuid', portUuid]]]
const selectResult = await this._select(
'Port',
['name', 'other_config'],
where,
socket
)
if (selectResult == null) {
continue
}
forOwn(selectResult.other_config[1], config => {
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
log.debug(
`[${this._host.name_label}] Adding port: '${selectResult.name}' to delete list from bridge: '${bridgeName}'`
)
portsToDelete.push(['uuid', portUuid])
}
})
}
if (portsToDelete.length === 0) {
// Nothing to do
socket.destroy()
return
}
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
const params = ['Open_vSwitch', mutateBridgeOperation]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
socket.destroy()
return
}
if (jsonObjects[0].error != null) {
log.error(
`[${this._host.name_label}] Couldn't delete ports from bridge: '${bridgeName}' because: ${jsonObjects.error}`
)
socket.destroy()
return
}
log.debug(
`[${this._host.name_label}] Deleted ${jsonObjects[0].result[0].count} ports from bridge: '${bridgeName}'`
)
socket.destroy()
}
// ===========================================================================
_parseJson(chunk) {
let data = chunk.toString()
let buffer = ''
let depth = 0
let pos = 0
const objects = []
for (let i = pos; i < data.length; ++i) {
const c = data.charAt(i)
if (c === '{') {
depth++
} else if (c === '}') {
depth--
if (depth === 0) {
const object = JSON.parse(buffer + data.substr(0, i + 1))
objects.push(object)
buffer = ''
data = data.substr(i + 1)
pos = 0
i = -1
}
}
}
buffer += data
return objects
}
// ---------------------------------------------------------------------------
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
[
'external_ids',
'includes',
['map', [['xs-network-uuids', networkUuid]]],
],
]
const selectResult = await this._select(
'Bridge',
['_uuid', 'name'],
where,
socket
)
if (selectResult == null) {
return [null, null]
}
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
log.debug(
`[${this._host.name_label}] Found bridge: '${bridgeName}' for network: '${networkName}'`
)
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports == null) {
return
}
for (const port of ports) {
const portUuid = port[1]
const interfaces = await this._getPortInterfaces(portUuid, socket)
if (interfaces == null) {
continue
}
for (const iface of interfaces) {
const interfaceUuid = iface[1]
const hasRemote = await this._interfaceHasRemote(
interfaceUuid,
remoteAddress,
socket
)
if (hasRemote === true) {
return true
}
}
}
return false
}
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult == null) {
return null
}
return selectResult.ports[0] === 'set'
? selectResult.ports[1]
: [selectResult.ports]
}
async _getPortInterfaces(portUuid, socket) {
const where = [['_uuid', '==', ['uuid', portUuid]]]
const selectResult = await this._select(
'Port',
['name', 'interfaces'],
where,
socket
)
if (selectResult == null) {
return null
}
return selectResult.interfaces[0] === 'set'
? selectResult.interfaces[1]
: [selectResult.interfaces]
}
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
const where = [['_uuid', '==', ['uuid', interfaceUuid]]]
const selectResult = await this._select(
'Interface',
['name', 'options'],
where,
socket
)
if (selectResult == null) {
return false
}
for (const option of selectResult.options[1]) {
if (option[0] === 'remote_ip' && option[1] === remoteAddress) {
return true
}
}
return false
}
// ---------------------------------------------------------------------------
async _select(table, columns, where, socket) {
const selectOperation = {
op: 'select',
table: table,
columns: columns,
where: where,
}
const params = ['Open_vSwitch', selectOperation]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
return
}
const jsonResult = jsonObjects[0].result[0]
if (jsonResult.error != null) {
log.error(
`[${this._host.name_label}] Couldn't retrieve: '${columns}' in: '${table}' because: ${jsonResult.error}: ${jsonResult.details}`
)
return null
}
if (jsonResult.rows.length === 0) {
log.error(
`[${this._host.name_label}] No '${columns}' found in: '${table}' where: '${where}'`
)
return null
}
// For now all select operations should return only 1 row
assert(
jsonResult.rows.length === 1,
`[${this._host.name_label}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
)
return jsonResult.rows[0]
}
async _sendOvsdbTransaction(params, socket) {
const stream = socket
const requestId = this._requestID
++this._requestID
const req = {
id: requestId,
method: 'transact',
params: params,
}
try {
stream.write(JSON.stringify(req))
} catch (error) {
log.error(
`[${this._host.name_label}] Error while writing into stream: ${error}`
)
return null
}
let result
let jsonObjects
let resultRequestId
do {
try {
result = await fromEvent(stream, 'data', {})
} catch (error) {
log.error(
`[${this._host.name_label}] Error while waiting for stream data: ${error}`
)
return null
}
jsonObjects = this._parseJson(result)
resultRequestId = jsonObjects[0].id
} while (resultRequestId !== requestId)
return jsonObjects
}
// ---------------------------------------------------------------------------
async _connect() {
const options = {
ca: this._caCert,
key: this._clientKey,
cert: this._clientCert,
host: this._host.address,
port: OVSDB_PORT,
rejectUnauthorized: false,
requestCert: false,
}
const socket = connect(options)
try {
await fromEvent(socket, 'secureConnect', {})
} catch (error) {
log.error(
`[${this._host.name_label}] TLS connection failed because: ${error}: ${error.code}`
)
throw error
}
log.debug(`[${this._host.name_label}] TLS connection successful`)
socket.on('error', error => {
log.error(
`[${this._host.name_label}] OVSDB client socket error: ${error} with code: ${error.code}`
)
})
return socket
}
}

View File

@@ -32,9 +32,9 @@
"node": ">=6"
},
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer": "^5.0.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.13.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,6 +50,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -49,6 +49,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -33,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"slack-node": "^0.1.8"
},
"devDependencies": {
@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -39,10 +39,10 @@
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/log": "^0.1.4",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"html-minifier": "^3.5.8",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.13.0"
"promise-toolbox": "^0.12.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -59,6 +59,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -29,9 +29,6 @@ guessVhdSizeOnImport = false
# be turned for investigation by the administrator.
verboseApiLogsOnErrors = false
# if no events could be fetched during this delay, the server will be marked as disconnected
xapiMarkDisconnectedDelay = '5 minutes'
# https:#github.com/websockets/ws#websocket-compression
[apiWebSocketOptions]
perMessageDeflate = { threshold = 524288 } # 512kiB
@@ -52,17 +49,6 @@ maxTokenValidity = '0.5 year'
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
# Duration for which we can wait for the backup size before returning
#
# It should be short to avoid blocking the display of the available backups.
vmBackupSizeTimeout = '2 seconds'
# Helmet handles HTTP security via headers
#
# https://helmetjs.github.io/docs/
#[http.helmet.hsts]
#includeSubDomains = false
[[http.listen]]
port = 80
@@ -82,7 +68,6 @@ honorCipherOrder = true
secureOptions = 117440512
[http.mounts]
'/' = '../xo-web/dist'
[remoteOptions]
mountsDir = '/run/xo-server/mounts'

View File

@@ -1,7 +1,6 @@
{
"private": true,
"name": "xo-server",
"version": "5.44.0",
"version": "5.38.1",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -38,11 +37,11 @@
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.10.0",
"@xen-orchestra/fs": "^0.8.0",
"@xen-orchestra/log": "^0.1.4",
"@xen-orchestra/mixin": "^0.0.0",
"ajv": "^6.1.1",
"app-conf": "^0.7.0",
"app-conf": "^0.6.1",
"archiver": "^3.0.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
@@ -51,7 +50,7 @@
"body-parser": "^1.18.2",
"compression": "^1.7.3",
"connect-flash": "^0.1.1",
"cookie": "^0.4.0",
"cookie": "^0.3.1",
"cookie-parser": "^1.4.3",
"d3-time-format": "^2.1.1",
"debug": "^4.0.1",
@@ -65,7 +64,7 @@
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.7.1",
@@ -102,7 +101,7 @@
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^24.0.0",
"promise-toolbox": "^0.13.0",
"promise-toolbox": "^0.12.1",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pump": "^3.0.0",
@@ -110,7 +109,7 @@
"readable-stream": "^3.2.0",
"redis": "^2.8.0",
"schema-inspector": "^1.6.8",
"semver": "^6.0.0",
"semver": "^5.4.1",
"serve-static": "^1.13.1",
"split-lines": "^2.0.0",
"stack-chain": "^2.0.0",
@@ -118,18 +117,18 @@
"struct-fu": "^1.2.0",
"tar-stream": "^2.0.1",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"tmp": "^0.0.33",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.7.0",
"vhd-lib": "^0.6.0",
"ws": "^6.0.0",
"xen-api": "^0.26.0",
"xen-api": "^0.25.0",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-remote-parser": "^0.5.0",
"xo-vmdk-to-vhd": "^0.1.7",
"xo-vmdk-to-vhd": "^0.1.6",
"yazl": "^2.4.3"
},
"devDependencies": {

View File

@@ -117,7 +117,7 @@ port = 80
# List of files/directories which will be served.
[http.mounts]
#'/any/url' = '/path/to/directory'
#'/' = '/path/to/xo-web/dist/'
# List of proxied URLs (HTTP & WebSockets).
[http.proxies]

View File

@@ -1,5 +0,0 @@
import fromCallback from 'promise-toolbox/fromCallback'
import { execFile } from 'child_process'
export const read = key =>
fromCallback(cb => execFile('xenstore-read', [key], cb))

View File

@@ -123,14 +123,10 @@ getJob.params = {
export async function runJob({
id,
schedule,
settings,
vm,
vms = vm !== undefined ? [vm] : undefined,
}) {
return this.runJobSequence([id], await this.getSchedule(schedule), {
settings,
vms,
})
return this.runJobSequence([id], await this.getSchedule(schedule), vms)
}
runJob.permission = 'admin'
@@ -142,13 +138,6 @@ runJob.params = {
schedule: {
type: 'string',
},
settings: {
type: 'object',
properties: {
'*': { type: 'object' },
},
optional: true,
},
vm: {
type: 'string',
optional: true,
@@ -194,7 +183,6 @@ getLogs.params = {
after: { type: ['number', 'string'], optional: true },
before: { type: ['number', 'string'], optional: true },
limit: { type: 'number', optional: true },
'*': { type: 'any' },
}
// -----------------------------------------------------------------------------

View File

@@ -1,6 +1,6 @@
import createLogger from '@xen-orchestra/log'
import pump from 'pump'
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors'
import { parseSize } from '../utils'
@@ -128,7 +128,7 @@ async function handleImportContent(req, res, { xapi, id }) {
res.end(format.response(0, true))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}

View File

@@ -1,22 +1,26 @@
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
// ===================================================================
export async function set({
host,
multipathing,
// TODO: use camel case.
name_label: nameLabel,
name_description: nameDescription,
}) {
host = this.getXapiObject(host)
const xapi = this.getXapi(host)
const hostId = host._xapiId
await Promise.all([
nameDescription !== undefined && host.set_name_description(nameDescription),
nameLabel !== undefined && host.set_name_label(nameLabel),
multipathing !== undefined &&
host.$xapi.setHostMultipathing(host.$id, multipathing),
])
if (multipathing !== undefined) {
await xapi.setHostMultipathing(hostId, multipathing)
}
return xapi.setHostProperties(hostId, {
nameLabel,
nameDescription,
})
}
set.description = 'changes the properties of an host'
@@ -211,25 +215,6 @@ emergencyShutdownHost.resolve = {
// -------------------------------------------------------------------
export async function isHostServerTimeConsistent({ host }) {
try {
await this.getXapi(host).assertConsistentHostServerTime(host._xapiRef)
return true
} catch (e) {
return false
}
}
isHostServerTimeConsistent.params = {
host: { type: 'string' },
}
isHostServerTimeConsistent.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export function stats({ host, granularity }) {
return this.getXapiHostStats(host._xapiId, granularity)
}
@@ -263,7 +248,7 @@ async function handleInstallSupplementalPack(req, res, { hostId }) {
res.end(format.response(0))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}
@@ -284,19 +269,3 @@ installSupplementalPack.params = {
installSupplementalPack.resolve = {
host: ['host', 'host', 'admin'],
}
// -------------------------------------------------------------------
export function isHyperThreadingEnabled({ host }) {
return this.getXapi(host).isHyperThreadingEnabled(host._xapiId)
}
isHyperThreadingEnabled.description = 'get hyper-threading information'
isHyperThreadingEnabled.params = {
id: { type: 'string' },
}
isHyperThreadingEnabled.resolve = {
host: ['id', 'host', 'administrate'],
}

View File

@@ -85,26 +85,18 @@ createBonded.description =
// ===================================================================
export async function set({
network,
automatic,
defaultIsLocked,
name_description: nameDescription,
name_label: nameLabel,
network,
}) {
network = this.getXapiObject(network)
await Promise.all([
automatic !== undefined &&
network.update_other_config('automatic', automatic ? 'true' : null),
defaultIsLocked !== undefined &&
network.set_default_locking_mode(
defaultIsLocked ? 'disabled' : 'unlocked'
),
nameDescription !== undefined &&
network.set_name_description(nameDescription),
nameLabel !== undefined && network.set_name_label(nameLabel),
])
await this.getXapi(network).setNetworkProperties(network._xapiId, {
automatic,
defaultIsLocked,
nameDescription,
nameLabel,
})
}
set.params = {

View File

@@ -5,7 +5,7 @@
async function delete_({ PBD }) {
// TODO: check if PBD is attached before
await this.getXapi(PBD).callAsync('PBD.destroy', PBD._xapiRef)
await this.getXapi(PBD).call('PBD.destroy', PBD._xapiRef)
}
export { delete_ as delete }
@@ -37,7 +37,7 @@ disconnect.resolve = {
export async function connect({ PBD }) {
// TODO: check if PBD is attached before
await this.getXapi(PBD).callAsync('PBD.plug', PBD._xapiRef)
await this.getXapi(PBD).call('PBD.plug', PBD._xapiRef)
}
connect.params = {

View File

@@ -1,7 +1,5 @@
// TODO: too low level, move into host.
import { filter, find } from 'lodash'
import { IPV4_CONFIG_MODES, IPV6_CONFIG_MODES } from '../xapi'
export function getIpv4ConfigurationModes() {
@@ -17,17 +15,7 @@ export function getIpv6ConfigurationModes() {
async function delete_({ pif }) {
// TODO: check if PIF is attached before
const xapi = this.getXapi(pif)
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
const tunnel = find(tunnels, { access_PIF: pif._xapiRef })
if (tunnel != null) {
await xapi.callAsync('PIF.unplug', pif._xapiRef)
await xapi.callAsync('tunnel.destroy', tunnel.$ref)
return
}
await xapi.callAsync('PIF.destroy', pif._xapiRef)
await this.getXapi(pif).call('PIF.destroy', pif._xapiRef)
}
export { delete_ as delete }
@@ -44,7 +32,7 @@ delete_.resolve = {
export async function disconnect({ pif }) {
// TODO: check if PIF is attached before
await this.getXapi(pif).callAsync('PIF.unplug', pif._xapiRef)
await this.getXapi(pif).call('PIF.unplug', pif._xapiRef)
}
disconnect.params = {
@@ -59,7 +47,7 @@ disconnect.resolve = {
export async function connect({ pif }) {
// TODO: check if PIF is attached before
await this.getXapi(pif).callAsync('PIF.plug', pif._xapiRef)
await this.getXapi(pif).call('PIF.plug', pif._xapiRef)
}
connect.params = {

View File

@@ -1,19 +1,18 @@
import { format, JsonRPcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
// ===================================================================
export async function set({
pool,
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel,
}) {
pool = this.getXapiObject(pool)
await Promise.all([
nameDescription !== undefined && pool.set_name_description(nameDescription),
nameLabel !== undefined && pool.set_name_label(nameLabel),
])
await this.getXapi(pool).setPoolProperties({
nameDescription,
nameLabel,
})
}
set.params = {
@@ -168,7 +167,9 @@ export async function mergeInto({ source, target, force }) {
if (sourceHost.productBrand !== targetHost.productBrand) {
throw new Error(
`a ${sourceHost.productBrand} pool cannot be merged into a ${targetHost.productBrand} pool`
`a ${sourceHost.productBrand} pool cannot be merged into a ${
targetHost.productBrand
} pool`
)
}
@@ -233,7 +234,7 @@ async function handleInstallSupplementalPack(req, res, { poolId }) {
res.end(format.response(0))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRPcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}

View File

@@ -100,24 +100,20 @@ set.params = {
optional: true,
type: 'boolean',
},
readOnly: {
optional: true,
type: 'boolean',
},
}
// -------------------------------------------------------------------
export async function enable({ id }) {
export async function connect({ id }) {
this.updateXenServer(id, { enabled: true })::ignoreErrors()
await this.connectXenServer(id)
}
enable.description = 'enable a Xen server'
connect.description = 'connect a Xen server'
enable.permission = 'admin'
connect.permission = 'admin'
enable.params = {
connect.params = {
id: {
type: 'string',
},
@@ -125,16 +121,16 @@ enable.params = {
// -------------------------------------------------------------------
export async function disable({ id }) {
export async function disconnect({ id }) {
this.updateXenServer(id, { enabled: false })::ignoreErrors()
await this.disconnectXenServer(id)
}
disable.description = 'disable a Xen server'
disconnect.description = 'disconnect a Xen server'
disable.permission = 'admin'
disconnect.permission = 'admin'
disable.params = {
disconnect.params = {
id: {
type: 'string',
},

View File

@@ -10,15 +10,14 @@ import { forEach, parseXml } from '../utils'
export async function set({
sr,
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel,
}) {
sr = this.getXapiObject(sr)
await Promise.all([
nameDescription !== undefined && sr.set_name_description(nameDescription),
nameLabel !== undefined && sr.set_name_label(nameLabel),
])
await this.getXapi(sr).setSrProperties(sr._xapiId, {
nameDescription,
nameLabel,
})
}
set.params = {
@@ -36,7 +35,7 @@ set.resolve = {
// -------------------------------------------------------------------
export async function scan({ SR }) {
await this.getXapi(SR).callAsync('SR.scan', SR._xapiRef)
await this.getXapi(SR).call('SR.scan', SR._xapiRef)
}
scan.params = {
@@ -180,35 +179,6 @@ createIso.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export async function createFile({
host,
nameLabel,
nameDescription,
location,
}) {
const xapi = this.getXapi(host)
return xapi.createSr({
hostRef: host._xapiRef,
name_label: nameLabel,
name_description: nameDescription,
type: 'file',
device_config: { location },
})
}
createFile.params = {
host: { type: 'string' },
nameLabel: { type: 'string' },
nameDescription: { type: 'string' },
location: { type: 'string' },
}
createFile.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// NFS SR
@@ -391,58 +361,6 @@ createExt.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect all ZFS pools
// Return a dict of pools with their parameters { <poolname>: {<paramdict>}}
// example output (the parameter mountpoint is of interest):
// {"tank":
// {
// "setuid": "on", "relatime": "off", "referenced": "24K", "written": "24K", "zoned": "off", "primarycache": "all",
// "logbias": "latency", "creation": "Mon May 27 17:24 2019", "sync": "standard", "snapdev": "hidden",
// "dedup": "off", "sharenfs": "off", "usedbyrefreservation": "0B", "sharesmb": "off", "createtxg": "1",
// "canmount": "on", "mountpoint": "/tank", "casesensitivity": "sensitive", "utf8only": "off", "xattr": "on",
// "dnodesize": "legacy", "mlslabel": "none", "objsetid": "54", "defcontext": "none", "rootcontext": "none",
// "mounted": "yes", "compression": "off", "overlay": "off", "logicalused": "47K", "usedbysnapshots": "0B",
// "filesystem_count": "none", "copies": "1", "snapshot_limit": "none", "aclinherit": "restricted",
// "compressratio": "1.00x", "readonly": "off", "version": "5", "normalization": "none", "filesystem_limit": "none",
// "type": "filesystem", "secondarycache": "all", "refreservation": "none", "available": "17.4G", "used": "129K",
// "exec": "on", "refquota": "none", "refcompressratio": "1.00x", "quota": "none", "keylocation": "none",
// "snapshot_count": "none", "fscontext": "none", "vscan": "off", "reservation": "none", "atime": "on",
// "recordsize": "128K", "usedbychildren": "105K", "usedbydataset": "24K", "guid": "656061077639704004",
// "pbkdf2iters": "0", "checksum": "on", "special_small_blocks": "0", "redundant_metadata": "all",
// "volmode": "default", "devices": "on", "keyformat": "none", "logicalreferenced": "12K", "acltype": "off",
// "nbmand": "off", "context": "none", "encryption": "off", "snapdir": "hidden"}}
export async function probeZfs({ host }) {
const xapi = this.getXapi(host)
try {
const result = await xapi.call(
'host.call_plugin',
host._xapiRef,
'zfs.py',
'list_zfs_pools',
{}
)
return JSON.parse(result)
} catch (error) {
if (
error.code === 'XENAPI_MISSING_PLUGIN' ||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
) {
return {}
} else {
throw error
}
}
}
probeZfs.params = {
host: { type: 'string' },
}
probeZfs.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect all NFS shares (exports) on a NFS server
// Return a table of exports with their paths and ACLs

View File

@@ -1,5 +1,5 @@
export async function add({ tag, object }) {
await this.getXapiObject(object).add_tags(tag)
await this.getXapi(object).addTag(object._xapiId, tag)
}
add.description = 'add a new tag to an object'
@@ -16,7 +16,7 @@ add.params = {
// -------------------------------------------------------------------
export async function remove({ tag, object }) {
await this.getXapiObject(object).remove_tags(tag)
await this.getXapi(object).removeTag(object._xapiId, tag)
}
remove.description = 'remove an existing tag from an object'

View File

@@ -1,6 +1,3 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
export function getPermissionsForUser({ userId }) {
return this.getPermissionsForUser(userId)
}
@@ -89,35 +86,3 @@ copyVm.resolve = {
vm: ['vm', 'VM'],
sr: ['sr', 'SR'],
}
// -------------------------------------------------------------------
export async function changeConnectedXapiHostname({
hostname,
newObject,
oldObject,
}) {
const xapi = this.getXapi(oldObject)
const { pool: currentPool } = xapi
xapi._setUrl({ ...xapi._url, hostname })
await fromEvent(xapi.objects, 'finish')
if (xapi.pool.$id === currentPool.$id) {
await fromEvent(xapi.objects, 'finish')
}
assert(xapi.pool.$id !== currentPool.$id)
assert.doesNotThrow(() => this.getXapi(newObject))
assert.throws(() => this.getXapi(oldObject))
}
changeConnectedXapiHostname.description =
'change the connected XAPI hostname and check if the pool and the local cache are updated'
changeConnectedXapiHostname.permission = 'admin'
changeConnectedXapiHostname.params = {
hostname: { type: 'string' },
newObject: { type: 'string', description: "new connection's XO object" },
oldObject: { type: 'string', description: "current connection's XO object" },
}

View File

@@ -34,25 +34,3 @@ delete_.permission = 'admin'
delete_.params = {
token: { type: 'string' },
}
// -------------------------------------------------------------------
export async function deleteAll({ except }) {
await this.deleteAuthenticationTokens({
filter: {
user_id: this.session.get('user_id'),
id: {
__not: except,
},
},
})
}
deleteAll.description =
'delete all tokens of the current user except the current one'
deleteAll.permission = ''
deleteAll.params = {
except: { type: 'string', optional: true },
}

View File

@@ -48,7 +48,8 @@ connect.resolve = {
export async function set({ position, vbd }) {
if (position !== undefined) {
await this.getXapiObject(vbd).set_userdevice(String(position))
const xapi = this.getXapi(vbd)
await xapi.call('VBD.set_userdevice', vbd._xapiRef, String(position))
}
}
@@ -66,7 +67,9 @@ set.resolve = {
// -------------------------------------------------------------------
export async function setBootable({ vbd, bootable }) {
await this.getXapiObject(vbd).set_bootable(bootable)
const xapi = this.getXapi(vbd)
await xapi.call('VBD.set_bootable', vbd._xapiRef, bootable)
}
setBootable.params = {

View File

@@ -64,7 +64,6 @@ export async function set({
allowedIpv4Addresses,
allowedIpv6Addresses,
attached,
rateLimit,
}) {
const oldIpAddresses = vif.allowedIpv4Addresses.concat(
vif.allowedIpv6Addresses
@@ -92,9 +91,6 @@ export async function set({
mac,
currently_attached: attached,
ipv4_allowed: newIpAddresses,
qos_algorithm_type: rateLimit != null ? 'ratelimit' : undefined,
qos_algorithm_params:
rateLimit != null ? { kbps: String(rateLimit) } : undefined,
})
await this.allocIpAddresses(newVif.$id, newIpAddresses)
@@ -111,7 +107,6 @@ export async function set({
return this.getXapi(vif).editVif(vif._xapiId, {
ipv4Allowed: allowedIpv4Addresses,
ipv6Allowed: allowedIpv6Addresses,
rateLimit,
})
}
@@ -134,11 +129,6 @@ set.params = {
optional: true,
},
attached: { type: 'boolean', optional: true },
rateLimit: {
description: 'in kilobytes per seconds',
optional: true,
type: ['number', 'null'],
},
}
set.resolve = {

View File

@@ -1,5 +1,5 @@
import defer from 'golike-defer'
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
import { ignoreErrors } from 'promise-toolbox'
import { assignWith, concat } from 'lodash'
import {
@@ -193,11 +193,6 @@ create.params = {
optional: true,
},
networkConfig: {
type: 'string',
optional: true,
},
coreOs: {
type: 'boolean',
optional: true,
@@ -320,11 +315,6 @@ create.params = {
},
},
},
hvmBootFirmware: { type: 'string', optional: true },
// other params are passed to `editVm`
'*': { type: 'any' },
}
create.resolve = {
@@ -565,8 +555,6 @@ set.params = {
// Identifier of the VM to update.
id: { type: 'string' },
auto_poweron: { type: 'boolean', optional: true },
name_label: { type: 'string', optional: true },
name_description: { type: 'string', optional: true },
@@ -610,7 +598,7 @@ set.params = {
// Switch from Cirrus video adaptor to VGA adaptor
vga: { type: 'string', optional: true },
videoram: { type: 'number', optional: true },
videoram: { type: ['string', 'number'], optional: true },
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
@@ -628,9 +616,6 @@ set.params = {
// set the VM network interface controller
nicType: { type: ['string', 'null'], optional: true },
// set the VM boot firmware mode
hvmBootFirmware: { type: ['string', 'null'], optional: true },
}
set.resolve = {
@@ -640,7 +625,13 @@ set.resolve = {
// -------------------------------------------------------------------
export async function restart({ vm, force = false }) {
return this.getXapi(vm).rebootVm(vm._xapiId, { hard: force })
const xapi = this.getXapi(vm)
if (force) {
await xapi.call('VM.hard_reboot', vm._xapiRef)
} else {
await xapi.call('VM.clean_reboot', vm._xapiRef)
}
}
restart.params = {
@@ -741,7 +732,7 @@ export async function convertToTemplate({ vm }) {
// Convert to a template requires pool admin permission.
await this.checkPermissions(this.user.id, [[vm.$pool, 'administrate']])
await this.getXapiObject(vm).set_is_a_template(true)
await this.getXapi(vm).call('VM.set_is_a_template', vm._xapiRef, true)
}
convertToTemplate.params = {
@@ -1093,7 +1084,7 @@ stop.resolve = {
// -------------------------------------------------------------------
export async function suspend({ vm }) {
await this.getXapi(vm).callAsync('VM.suspend', vm._xapiRef)
await this.getXapi(vm).call('VM.suspend', vm._xapiRef)
}
suspend.params = {
@@ -1107,7 +1098,7 @@ suspend.resolve = {
// -------------------------------------------------------------------
export async function pause({ vm }) {
await this.getXapi(vm).callAsync('VM.pause', vm._xapiRef)
await this.getXapi(vm).call('VM.pause', vm._xapiRef)
}
pause.params = {
@@ -1134,10 +1125,7 @@ resume.resolve = {
// -------------------------------------------------------------------
export async function revert({ snapshot, snapshotBefore }) {
await this.checkPermissions(this.user.id, [
[snapshot.$snapshot_of, 'operate'],
])
export function revert({ snapshot, snapshotBefore }) {
return this.getXapi(snapshot).revertVm(snapshot._xapiId, snapshotBefore)
}
@@ -1147,7 +1135,7 @@ revert.params = {
}
revert.resolve = {
snapshot: ['snapshot', 'VM-snapshot', 'view'],
snapshot: ['snapshot', 'VM-snapshot', 'administrate'],
}
// -------------------------------------------------------------------
@@ -1210,7 +1198,7 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
res.end(format.response(0, vm.$id))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}
@@ -1373,7 +1361,9 @@ createInterface.resolve = {
// -------------------------------------------------------------------
export async function attachPci({ vm, pciId }) {
await this.getXapiObject(vm).update_other_config('pci', pciId)
const xapi = this.getXapi(vm)
await xapi.call('VM.add_to_other_config', vm._xapiRef, 'pci', pciId)
}
attachPci.params = {
@@ -1388,7 +1378,9 @@ attachPci.resolve = {
// -------------------------------------------------------------------
export async function detachPci({ vm }) {
await this.getXapiObject(vm).update_other_config('pci', null)
const xapi = this.getXapi(vm)
await xapi.call('VM.remove_from_other_config', vm._xapiRef, 'pci')
}
detachPci.params = {
@@ -1421,11 +1413,15 @@ stats.resolve = {
// -------------------------------------------------------------------
export async function setBootOrder({ vm, order }) {
if (vm.virtualizationMode !== 'hvm') {
throw invalidParameters('You can only set the boot order on a HVM guest')
const xapi = this.getXapi(vm)
order = { order }
if (vm.virtualizationMode === 'hvm') {
await xapi.call('VM.set_HVM_boot_params', vm._xapiRef, order)
return
}
await this.getXapiObject(vm).update_HVM_boot_params('order', order)
throw invalidParameters('You can only set the boot order on a HVM guest')
}
setBootOrder.params = {

View File

@@ -55,7 +55,6 @@ getAllObjects.description = 'Returns all XO objects'
getAllObjects.params = {
filter: { type: 'object', optional: true },
limit: { type: 'number', optional: true },
ndjson: { type: 'boolean', optional: true },
}
// -------------------------------------------------------------------

View File

@@ -269,10 +269,10 @@ export async function fixHostNotInNetwork({ xosanSr, host }) {
if (pif) {
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
reconfigurePifIP(xapi, pif, newIP)
await xapi.callAsync('PIF.plug', pif.$ref)
await xapi.call('PIF.plug', pif.$ref)
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
if (PBD) {
await xapi.callAsync('PBD.plug', PBD.$ref)
await xapi.call('PBD.plug', PBD.$ref)
}
const sshKey = await getOrCreateSshKey(xapi)
await callPlugin(xapi, host, 'receive_ssh_keys', {
@@ -446,7 +446,9 @@ const createNetworkAndInsertHosts = defer(async function(
})
if (result.exit !== 0) {
throw invalidParameters(
`Could not ping ${master.name_label}->${address.pif.$host.name_label} (${address.address}) \n${result.stdout}`
`Could not ping ${master.name_label}->${
address.pif.$host.name_label
} (${address.address}) \n${result.stdout}`
)
}
})
@@ -807,7 +809,7 @@ export const createSR = defer(async function(
})
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 6 }
log.debug('scanning new SR')
await xapi.callAsync('SR.scan', xosanSrRef)
await xapi.call('SR.scan', xosanSrRef)
await this.rebindLicense({
licenseId: license.id,
oldBoundObjectId: tmpBoundObjectId,
@@ -882,13 +884,13 @@ async function createVDIOnLVMWithoutSizeLimit(xapi, lvmSr, diskSize) {
if (result.exit !== 0) {
throw Error('Could not create volume ->' + result.stdout)
}
await xapi.callAsync('SR.scan', xapi.getObject(lvmSr).$ref)
await xapi.call('SR.scan', xapi.getObject(lvmSr).$ref)
const vdi = find(xapi.getObject(lvmSr).$VDIs, vdi => vdi.uuid === uuid)
if (vdi != null) {
await Promise.all([
vdi.set_name_description('Created by XO'),
vdi.set_name_label('xosan_data'),
])
await xapi.setSrProperties(vdi.$ref, {
nameLabel: 'xosan_data',
nameDescription: 'Created by XO',
})
return vdi
}
}
@@ -987,7 +989,7 @@ async function replaceBrickOnSameVM(
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 4 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1048,7 +1050,9 @@ export async function replaceBrick({
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 1 }
await glusterCmd(
glusterEndpoint,
`volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`
`volume replace-brick xosan ${previousBrick} ${
addressAndHost.brickName
} commit force`
)
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
data.nodes.splice(nodeIndex, 1, {
@@ -1064,7 +1068,7 @@ export async function replaceBrick({
await xapi.deleteVm(previousVMEntry.vm, true)
}
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 3 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1111,7 +1115,7 @@ async function _prepareGlusterVm(
const firstVif = newVM.$VIFs[0]
if (xosanNetwork.$id !== firstVif.$network.$id) {
try {
await xapi.callAsync('VIF.move', firstVif.$ref, xosanNetwork.$ref)
await xapi.call('VIF.move', firstVif.$ref, xosanNetwork.$ref)
} catch (error) {
if (error.code === 'MESSAGE_METHOD_UNKNOWN') {
// VIF.move has been introduced in xenserver 7.0
@@ -1120,13 +1124,15 @@ async function _prepareGlusterVm(
}
}
}
await newVM.add_tags('XOSAN')
await xapi.addTag(newVM.$id, 'XOSAN')
await xapi.editVm(newVM, {
name_label: `XOSAN - ${lvmSr.name_label} - ${host.name_label} ${labelSuffix}`,
name_label: `XOSAN - ${lvmSr.name_label} - ${
host.name_label
} ${labelSuffix}`,
name_description: 'Xosan VM storage',
memory: memorySize,
})
await newVM.set_xenstore_data(xenstoreData)
await xapi.call('VM.set_xenstore_data', newVM.$ref, xenstoreData)
const rootDisk = newVM.$VBDs
.map(vbd => vbd && vbd.$VDI)
.find(vdi => vdi && vdi.name_label === 'xosan_root')
@@ -1324,7 +1330,7 @@ export const addBricks = defer(async function(
data.nodes = data.nodes.concat(newNodes)
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 2 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1376,7 +1382,7 @@ export const removeBricks = defer(async function($defer, { xosansr, bricks }) {
)
remove(data.nodes, node => ips.includes(node.vm.ip))
await xapi.xo.setData(xosansr.id, 'xosan_config', data)
await xapi.callAsync('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
} finally {
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
@@ -1536,10 +1542,9 @@ export async function downloadAndInstallXosanPack({ id, version, pool }) {
const res = await this.requestResource('xosan', id, version)
await xapi.installSupplementalPackOnAllHosts(res)
await xapi.pool.update_other_config(
'xosan_pack_installation_time',
String(Math.floor(Date.now() / 1e3))
)
await xapi._updateObjectMapProperty(xapi.pool, 'other_config', {
xosan_pack_installation_time: String(Math.floor(Date.now() / 1e3)),
})
}
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'

View File

@@ -13,6 +13,7 @@ import includes from 'lodash/includes'
import proxyConsole from './proxy-console'
import pw from 'pw'
import serveStatic from 'serve-static'
import startsWith from 'lodash/startsWith'
import stoppable from 'stoppable'
import WebServer from 'http-server-plus'
import WebSocket from 'ws'
@@ -92,7 +93,7 @@ async function loadConfiguration() {
function createExpressApp(config) {
const app = createExpress()
app.use(helmet(config.http.helmet))
app.use(helmet())
app.use(compression())
@@ -331,7 +332,7 @@ async function registerPluginsInPath(path) {
await Promise.all(
mapToArray(files, name => {
if (name.startsWith(PLUGIN_PREFIX)) {
if (startsWith(name, PLUGIN_PREFIX)) {
return registerPluginWrapper.call(
this,
`${path}/${name}`,
@@ -416,7 +417,6 @@ const setUpProxies = (express, opts, xo) => {
}
const proxy = createProxyServer({
changeOrigin: true,
ignorePath: true,
}).on('error', error => console.error(error))
@@ -427,7 +427,7 @@ const setUpProxies = (express, opts, xo) => {
const { url } = req
for (const prefix in opts) {
if (url.startsWith(prefix)) {
if (startsWith(url, prefix)) {
const target = opts[prefix]
proxy.web(req, res, {
@@ -451,7 +451,7 @@ const setUpProxies = (express, opts, xo) => {
const { url } = req
for (const prefix in opts) {
if (url.startsWith(prefix)) {
if (startsWith(url, prefix)) {
const target = opts[prefix]
proxy.ws(req, socket, head, {

View File

@@ -2,8 +2,6 @@ import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
import { parseProp } from './utils'
// ===================================================================
export default class Remote extends Model {}
@@ -16,21 +14,12 @@ export class Remotes extends Collection {
async get(properties) {
const remotes = await super.get(properties)
forEach(remotes, remote => {
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
remote.benchmarks =
remote.benchmarks !== undefined
? JSON.parse(remote.benchmarks)
: undefined
remote.enabled = remote.enabled === 'true'
})
return remotes
}
_update(remotes) {
return super._update(
remotes.map(remote => {
const { benchmarks } = remote
if (benchmarks !== undefined) {
remote.benchmarks = JSON.stringify(benchmarks)
}
return remote
})
)
}
}

Some files were not shown because too many files have changed in this diff Show More