Compare commits

..

1 Commits

Author SHA1 Message Date
Julien Fontanet
3facbcda99 feat(xen-api/_watchEvents): detect and fix desynchornizations 2019-04-08 15:46:26 +02:00
171 changed files with 3772 additions and 7259 deletions

View File

@@ -1,7 +1,5 @@
module.exports = {
extends: [
'plugin:eslint-comments/recommended',
'standard',
'standard-jsx',
'prettier',
@@ -21,7 +19,7 @@ module.exports = {
overrides: [
{
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
files: ['packages/*cli*/**/*.js', '*-cli.js'],
rules: {
'no-console': 'off',
},
@@ -35,9 +33,6 @@ module.exports = {
},
},
rules: {
// disabled because XAPI objects are using camel case
camelcase: ['off'],
'no-console': ['error', { allow: ['warn', 'error'] }],
'no-var': 'error',
'node/no-extraneous-import': 'error',

View File

@@ -46,7 +46,6 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -16,9 +16,6 @@
},
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.26.0"
},
"scripts": {
"postversion": "npm publish"
"xen-api": "^0.25.1"
}
}

View File

@@ -55,7 +55,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -42,7 +42,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/fs",
"version": "0.9.0",
"version": "0.8.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -21,18 +21,18 @@
"node": ">=6"
},
"dependencies": {
"@marsaud/smb2": "^0.14.0",
"@marsaud/smb2": "^0.13.0",
"@sindresorhus/df": "^2.1.0",
"@xen-orchestra/async-map": "^0.0.0",
"decorator-synchronized": "^0.5.0",
"execa": "^1.0.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.12.1",
"readable-stream": "^3.0.6",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.5.0"
},
"devDependencies": {
@@ -45,7 +45,7 @@
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"dotenv": "^8.0.0",
"dotenv": "^7.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
@@ -55,7 +55,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -400,10 +400,6 @@ export default class RemoteHandlerAbstract {
}
}
async truncate(file: string, len: number): Promise<void> {
await this._truncate(file, len)
}
async unlink(file: string, { checksum = true }: Object = {}): Promise<void> {
file = normalizePath(file)
@@ -414,18 +410,6 @@ export default class RemoteHandlerAbstract {
await this._unlink(file).catch(ignoreEnoent)
}
async write(
file: File,
buffer: Buffer,
position: number
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
await this._write(
typeof file === 'string' ? normalizePath(file) : file,
buffer,
position
)
}
async writeFile(
file: string,
data: Data,
@@ -562,28 +546,6 @@ export default class RemoteHandlerAbstract {
throw new Error('Not implemented')
}
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
const isPath = typeof file === 'string'
if (isPath) {
file = await this.openFile(file, 'r+')
}
try {
return await this._writeFd(file, buffer, position)
} finally {
if (isPath) {
await this.closeFile(file)
}
}
}
async _writeFd(
fd: FileDescriptor,
buffer: Buffer,
position: number
): Promise<void> {
throw new Error('Not implemented')
}
async _writeFile(
file: string,
data: Data,

View File

@@ -3,9 +3,9 @@
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import getStream from 'get-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { random } from 'lodash'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -310,70 +310,5 @@ handlers.forEach(url => {
await handler.unlink('file')
})
})
describe('#write()', () => {
beforeEach(() => handler.outputFile('file', TEST_DATA))
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
forOwn(
{
'dont increase file size': (() => {
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
const expected = Buffer.from(TEST_DATA)
PATCH_DATA.copy(expected, offset)
return { offset, expected }
})(),
'increase file size': (() => {
const offset = random(
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
TEST_DATA_LEN
)
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
TEST_DATA.copy(expected)
PATCH_DATA.copy(expected, offset)
return { offset, expected }
})(),
},
({ offset, expected }, title) => {
describe(title, () => {
testWithFileDescriptor('file', 'r+', async ({ file }) => {
await handler.write(file, PATCH_DATA, offset)
await expect(await handler.readFile('file')).toEqual(expected)
})
})
}
)
})
describe('#truncate()', () => {
forOwn(
{
'shrinks file': (() => {
const length = random(0, TEST_DATA_LEN)
const expected = TEST_DATA.slice(0, length)
return { length, expected }
})(),
'grows file': (() => {
const length = random(TEST_DATA_LEN, TEST_DATA_LEN * 2)
const expected = Buffer.alloc(length)
TEST_DATA.copy(expected)
return { length, expected }
})(),
},
({ length, expected }, title) => {
it(title, async () => {
await handler.outputFile('file', TEST_DATA)
await handler.truncate('file', length)
await expect(await handler.readFile('file')).toEqual(expected)
})
}
)
})
})
})

View File

@@ -106,18 +106,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
await fs.access(path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return fs.truncate(this._getFilePath(file), len)
}
async _unlink(file) {
return fs.unlink(this._getFilePath(file))
}
_writeFd(file, buffer, position) {
return fs.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
}

View File

@@ -155,20 +155,10 @@ export default class SmbHandler extends RemoteHandlerAbstract {
return this.list('.')
}
_truncate(file, len) {
return this._client
.truncate(this._getFilePath(file), len)
.catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client
.writeFile(this._getFilePath(file), data, options)

View File

@@ -48,7 +48,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -45,7 +45,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -1,99 +1,5 @@
# ChangeLog
## **next**
### Enhancements
- [VM/Advanced] Ability to use UEFI instead of BIOS [#4264](https://github.com/vatesfr/xen-orchestra/issues/4264) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4268))
### Bug fixes
- [XOA] Don't require editing the _email_ field in case of re-registration (PR [#4259](https://github.com/vatesfr/xen-orchestra/pull/4259))
### Released packages
- xen-api v0.25.2
- xo-server v5.43.0
- xo-web v5.43.0
## **5.35.0** (2019-05-29)
![Channel: latest](https://badgen.net/badge/channel/latest/yellow)
### Enhancements
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
- [Host] Display hyperthreading status on advanced tab [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4263](https://github.com/vatesfr/xen-orchestra/pull/4263))
### Bug fixes
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
- [Remotes] Fix disconnected remotes which may appear to work
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
### Released packages
- xo-server-backup-reports v0.16.1
- @xen-orchestra/fs v0.9.0
- vhd-lib v0.7.0
- xo-server v5.42.1
- xo-web v5.42.1
## **5.34.0** (2019-04-30)
![Channel: stable](https://badgen.net/badge/channel/stable/green)
### Highlights
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
### Enhancements
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
### Bug fixes
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
### Released packages
- xo-server-backup-reports v0.16.0
- complex-matcher v0.6.0
- xo-vmdk-to-vhd v0.1.7
- vhd-lib v0.6.1
- xo-server v5.40.0
- xo-web v5.40.1
## **5.33.1** (2019-04-04)
### Bug fix

View File

@@ -2,31 +2,17 @@
### Enhancements
- [Backup-ng/restore] Display size for full VM backup [#4009](https://github.com/vatesfr/xen-orchestra/issues/4009) (PR [#4245](https://github.com/vatesfr/xen-orchestra/pull/4245))
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
- [auth-saml] Improve compatibility with Microsoft Azure Active Directory (PR [#4294](https://github.com/vatesfr/xen-orchestra/pull/4294))
- [Host] Display warning when "Citrix Hypervisor" license has restrictions [#4251](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4279))
- [VM/Backup] Create backup bulk action [#2573](https://github.com/vatesfr/xen-orchestra/issues/2573) (PR [#4257](https://github.com/vatesfr/xen-orchestra/pull/4257))
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/#3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
- [Host] Display warning when host's time differs too much from XOA's time [#4113](https://github.com/vatesfr/xen-orchestra/issues/4113) (PR [#4173](https://github.com/vatesfr/xen-orchestra/pull/4173))
- [Host/storages, SR/hosts] Display PBD details [#4264](https://github.com/vatesfr/xen-orchestra/issues/4161) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4284))
- [VM/network] Display and set bandwidth rate-limit of a VIF [#4215](https://github.com/vatesfr/xen-orchestra/issues/4215) (PR [#4293](https://github.com/vatesfr/xen-orchestra/pull/4293))
- [SDN Controller] New plugin which enables creating pool-wide private networks [xcp-ng/xcp#175](https://github.com/xcp-ng/xcp/issues/175) (PR [#4269](https://github.com/vatesfr/xen-orchestra/pull/4269))
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
### Bug fixes
- [Metadata backup] Missing XAPIs should trigger a failure job [#4281](https://github.com/vatesfr/xen-orchestra/issues/4281) (PR [#4283](https://github.com/vatesfr/xen-orchestra/pull/4283))
- [Host/advanced] Fix host CPU hyperthreading detection [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4285](https://github.com/vatesfr/xen-orchestra/pull/4285))
- [iSCSI] Fix fibre channel paths display [#4291](https://github.com/vatesfr/xen-orchestra/issues/4291) (PR [#4303](https://github.com/vatesfr/xen-orchestra/pull/4303))
- [New VM] Fix tooltips not displayed on disabled elements in some browsers (e.g. Google Chrome) [#4304](https://github.com/vatesfr/xen-orchestra/issues/4304) (PR [#4309](https://github.com/vatesfr/xen-orchestra/pull/4309))
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
### Released packages
- xo-server-auth-ldap v0.6.5
- xen-api v0.26.0
- xo-server-sdn-controller v0.1
- xo-server-auth-saml v0.6.0
- xo-server-backup-reports v0.16.2
- xo-server v5.44.0
- xo-web v5.44.0
- xo-vmdk-to-vhd v0.1.7
- vhd-lib v0.6.1
- xo-server v5.39.0
- xo-web v5.39.0

View File

@@ -14,5 +14,5 @@
1. create a PR as soon as possible
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
1. when you want a review, add a reviewer (and only one)
1. when you want a review, add a reviewer
1. if necessary, update your PR, and re- add a reviewer

View File

@@ -1,4 +1,4 @@
# Xen Orchestra [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
# Xen Orchestra [![Chat with us](https://storage.crisp.im/plugins/images/936925df-f37b-4ba8-bab0-70cd2edcb0be/badge.svg)](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
![](http://i.imgur.com/tRffA5y.png)

View File

@@ -1,13 +1,13 @@
# Installation
SSH to your XenServer/XCP-ng host and execute the following:
SSH to your XenServer and execute the following:
```
bash -c "$(curl -s http://xoa.io/deploy)"
```
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
## [More on XOA](xoa.md)

View File

@@ -1,33 +1,24 @@
# Support
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
You can access our pro support if you subscribe to any of these plans:
XOA is available in multiple plans:
* Free
* Starter
* Enterprise
* Premium
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
## XOA Free support
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
The better the plan, the faster the support will be with higher priority.
## Community support
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
* In our [forum](https://xen-orchestra.com/forum/)
* In our IRC - `#xen-orchestra` on `Freenode`
We encourage you to give back to the community by assisting other users via these two avenues as well.
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
If you are using Xen Orchestra in production, please subscribe to a plan.
## Open a ticket
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)

View File

@@ -10,16 +10,15 @@
"eslint-config-prettier": "^4.1.0",
"eslint-config-standard": "12.0.0",
"eslint-config-standard-jsx": "^6.0.2",
"eslint-plugin-eslint-comments": "^3.1.1",
"eslint-plugin-import": "^2.8.0",
"eslint-plugin-node": "^9.0.1",
"eslint-plugin-node": "^8.0.0",
"eslint-plugin-promise": "^4.0.0",
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^4.0.0",
"exec-promise": "^0.7.0",
"flow-bin": "^0.100.0",
"flow-bin": "^0.95.1",
"globby": "^9.0.0",
"husky": "^2.2.0",
"husky": "^1.2.1",
"jest": "^24.1.0",
"lodash": "^4.17.4",
"prettier": "^1.10.2",

View File

@@ -1,6 +1,6 @@
{
"name": "complex-matcher",
"version": "0.6.0",
"version": "0.5.0",
"license": "ISC",
"description": "",
"keywords": [],
@@ -44,7 +44,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -599,13 +599,6 @@ export const parse = parser.parse.bind(parser)
// -------------------------------------------------------------------
const _extractStringFromRegexp = child => {
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
return unescapedRegexp
}
}
const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof Or) {
const strings = []
@@ -613,12 +606,6 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
strings.push(child.value)
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
strings.push(unescapedRegexp)
}
}
})
return strings
}
@@ -626,12 +613,6 @@ const _getPropertyClauseStrings = ({ child }) => {
if (child instanceof StringNode) {
return [child.value]
}
if (child instanceof RegExpNode) {
const unescapedRegexp = _extractStringFromRegexp(child)
if (unescapedRegexp !== undefined) {
return [unescapedRegexp]
}
}
return []
}

View File

@@ -12,13 +12,10 @@ import {
} from './'
it('getPropertyClausesStrings', () => {
const tmp = getPropertyClausesStrings(
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
)
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
expect(tmp).toEqual({
bar: ['baz'],
baz: ['foo', 'bar', 'boo', 'far'],
foo: ['bar'],
baz: ['foo', 'bar'],
})
})

View File

@@ -43,7 +43,6 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -27,12 +27,12 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.9.0",
"@xen-orchestra/fs": "^0.8.0",
"cli-progress": "^2.0.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.7.0"
"vhd-lib": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -44,14 +44,13 @@
"index-modules": "^0.3.0",
"promise-toolbox": "^0.12.1",
"rimraf": "^2.6.1",
"tmp": "^0.1.0"
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-lib",
"version": "0.7.0",
"version": "0.6.0",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -22,9 +22,9 @@
},
"dependencies": {
"async-iterator-to-stream": "^1.0.2",
"core-js": "^3.0.0",
"core-js": "3.0.0",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.12.1",
"struct-fu": "^1.2.0",
@@ -35,16 +35,16 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.9.0",
"@xen-orchestra/fs": "^0.8.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^1.0.0",
"fs-promise": "^2.0.0",
"get-stream": "^5.1.0",
"get-stream": "^4.0.0",
"index-modules": "^0.3.0",
"readable-stream": "^3.0.6",
"rimraf": "^2.6.2",
"tmp": "^0.1.0"
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -52,7 +52,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -1,7 +1,9 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
import checkFooter from './_checkFooter'
import checkHeader from './_checkHeader'
import constantStream from './_constant-stream'
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
@@ -230,11 +232,24 @@ export default class Vhd {
// Write functions.
// =================================================================
// Write a buffer at a given position in a vhd file.
// Write a buffer/stream at a given position in a vhd file.
async _write(data, offset) {
assert(Buffer.isBuffer(data))
debug(`_write offset=${offset} size=${data.length}`)
return this._handler.write(this._path, data, offset)
debug(
`_write offset=${offset} size=${
Buffer.isBuffer(data) ? data.length : '???'
}`
)
// TODO: could probably be merged in remote handlers.
const stream = await this._handler.createOutputStream(this._path, {
flags: 'r+',
start: offset,
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
async _freeFirstBlockSpace(spaceNeededBytes) {
@@ -291,7 +306,7 @@ export default class Vhd {
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
@@ -316,7 +331,10 @@ export default class Vhd {
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
this._write(
constantStream([0], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
this._setBatEntry(blockId, blockAddr),
])

View File

@@ -41,7 +41,7 @@
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"pw": "^0.0.4",
"xen-api": "^0.26.0"
"xen-api": "^0.25.1"
},
"devDependencies": {
"@babel/cli": "^7.1.5",
@@ -56,7 +56,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -82,7 +82,7 @@ console.log(xapi.pool.$master.$resident_VMs[0].name_label)
A CLI is provided to help exploration and discovery of the XAPI.
```
> xen-api xen1.company.net root
> xen-api https://xen1.company.net root
Password: ******
root@xen1.company.net> xapi.status
'connected'
@@ -92,14 +92,6 @@ root@xen1.company.net> xapi.pool.$master.name_label
'xen1'
```
You can optionally prefix the address by a protocol: `https://` (default) or `http://`.
In case of error due to invalid or self-signed certificates you can use the `--allow-unauthorized` flag (or `--au`):
```
> xen-api --au xen1.company.net root
```
To ease searches, `find()` and `findAll()` functions are available:
```

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.26.0",
"version": "0.25.1",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [
@@ -33,7 +33,6 @@
"node": ">=6"
},
"dependencies": {
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"debug": "^4.0.1",
"event-to-promise": "^0.8.0",
@@ -69,7 +68,6 @@
"plot": "gnuplot -p memory-test.gnu",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -0,0 +1,8 @@
const handler = {
get(target, property) {
const value = target[property]
return value !== undefined ? value : 0
},
}
export const create = () => new Proxy({ __proto__: null }, handler)

View File

@@ -9,7 +9,6 @@ import minimist from 'minimist'
import pw from 'pw'
import { asCallback, fromCallback } from 'promise-toolbox'
import { filter, find, isArray } from 'lodash'
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
import { start as createRepl } from 'repl'
import { createClient } from './'
@@ -26,20 +25,6 @@ function askPassword(prompt = 'Password: ') {
})
}
const { getPrototypeOf, ownKeys } = Reflect
function getAllBoundDescriptors(object) {
const descriptors = { __proto__: null }
let current = object
do {
ownKeys(current).forEach(key => {
if (!(key in descriptors)) {
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
}
})
} while ((current = getPrototypeOf(current)) !== null)
return descriptors
}
// ===================================================================
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
@@ -93,17 +78,11 @@ const main = async args => {
const repl = createRepl({
prompt: `${xapi._humanId}> `,
})
repl.context.xapi = xapi
{
const ctx = repl.context
ctx.xapi = xapi
ctx.diff = (a, b) => console.log('%s', diff(a, b))
ctx.find = predicate => find(xapi.objects.all, predicate)
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
}
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
repl.context.find = predicate => find(xapi.objects.all, predicate)
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
// Make the REPL waits for promise completion.
repl.eval = (evaluate => (cmd, context, filename, cb) => {

View File

@@ -15,6 +15,7 @@ import {
pTimeout,
} from 'promise-toolbox'
import * as MultiCounter from './_MultiCounter'
import autoTransport from './transports/auto'
import coalesceCalls from './_coalesceCalls'
import debug from './_debug'
@@ -34,7 +35,7 @@ const EVENT_TIMEOUT = 60
// ===================================================================
const { defineProperties, defineProperty, freeze, keys: getKeys } = Object
const { defineProperties, freeze, keys: getKeys } = Object
// -------------------------------------------------------------------
@@ -99,6 +100,7 @@ export class Xapi extends EventEmitter {
this._sessionId = undefined
this._status = DISCONNECTED
this._counter = MultiCounter.create()
this._debounce = opts.debounce ?? 200
this._objects = new Collection()
this._objectsByRef = { __proto__: null }
@@ -168,6 +170,22 @@ export class Xapi extends EventEmitter {
try {
await this._sessionOpen()
// Uses introspection to list available types.
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }
types.forEach(type => {
const lcType = type.toLowerCase()
if (lcType !== type) {
this._lcToTypes[lcType] = type
}
})
this._pool = (await this.getAllRecords('pool'))[0]
debug('%s: connected', this._humanId)
this._status = CONNECTED
this._resolveConnected()
@@ -723,28 +741,6 @@ export class Xapi extends EventEmitter {
},
}
)
const oldPoolRef = this._pool?.$ref
this._pool = (await this.getAllRecords('pool'))[0]
// if the pool ref has changed, it means that the XAPI has been restarted or
// it's not the same XAPI, we need to refetch the available types and reset
// the event loop in that case
if (this._pool.$ref !== oldPoolRef) {
// Uses introspection to list available types.
const types = (this._types = (await this._interruptOnDisconnect(
this._call('system.listMethods')
))
.filter(isGetAllRecordsMethod)
.map(method => method.slice(0, method.indexOf('.'))))
this._lcToTypes = { __proto__: null }
types.forEach(type => {
const lcType = type.toLowerCase()
if (lcType !== type) {
this._lcToTypes[lcType] = type
}
})
}
}
_setUrl(url) {
@@ -779,6 +775,10 @@ export class Xapi extends EventEmitter {
this._objects.set(object.$id, object)
objectsByRef[ref] = object
if (prev === undefined) {
++this._counter[type]
}
if (type === 'pool') {
this._pool = object
@@ -791,10 +791,6 @@ export class Xapi extends EventEmitter {
}
})
} else if (type === 'task') {
if (prev === undefined) {
++this._nTasks
}
const taskWatchers = this._taskWatchers
const taskWatcher = taskWatchers[ref]
if (taskWatcher !== undefined) {
@@ -826,6 +822,7 @@ export class Xapi extends EventEmitter {
}
async _refreshCachedRecords(types) {
const counter = this._counter
const toRemoveByType = { __proto__: null }
types.forEach(type => {
toRemoveByType[type] = new Set()
@@ -857,8 +854,15 @@ export class Xapi extends EventEmitter {
this._removeRecordFromCache(type, ref)
})
if (type === 'task') {
this._nTasks = refs.length
const count = refs.length
if (counter[type] !== count) {
console.warn(
'_refreshCachedRecords(%s): xapi=%d != local=%d',
type,
count,
counter[type]
)
counter[type] = count
}
} catch (error) {
// there is nothing ideal to do here, do not interrupt event
@@ -879,9 +883,7 @@ export class Xapi extends EventEmitter {
this._objects.unset(object.$id)
delete byRefs[ref]
if (type === 'task') {
--this._nTasks
}
--this._counter[type]
}
const taskWatchers = this._taskWatchers
@@ -933,6 +935,16 @@ export class Xapi extends EventEmitter {
this._resolveObjectsFetched()
this._resolveObjectsFetched = undefined
const IGNORED_TYPES = {
__proto__: null,
message: true,
role: true,
session: true,
user: true,
VBD_metrics: true,
VIF_metrics: true,
}
// event loop
const debounce = this._debounce
while (true) {
@@ -942,12 +954,9 @@ export class Xapi extends EventEmitter {
let result
try {
// don't use _sessionCall because a session failure should break the
// loop and trigger a complete refetch
result = await this._call(
result = await this._sessionCall(
'event.from',
[
this._sessionId,
types,
fromToken,
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
@@ -955,8 +964,7 @@ export class Xapi extends EventEmitter {
EVENT_TIMEOUT * 1e3 * 1.1
)
} catch (error) {
const code = error?.code
if (code === 'EVENTS_LOST' || code === 'SESSION_INVALID') {
if (error?.code === 'EVENTS_LOST') {
// eslint-disable-next-line no-labels
continue mainLoop
}
@@ -969,10 +977,25 @@ export class Xapi extends EventEmitter {
fromToken = result.token
this._processEvents(result.events)
// detect and fix disappearing tasks (e.g. when toolstack restarts)
if (result.valid_ref_counts.task !== this._nTasks) {
await this._refreshCachedRecords(['task'])
}
// detect and fix desynchronized records
const localCounts = this._counter
const xapiCounts = result.valid_ref_counts
await this._refreshCachedRecords(
types.filter(type => {
if (type in IGNORED_TYPES) {
return false
}
// XAPI uses lowercased types in events, but this may change, so we
// handle both
let xapiCount = xapiCounts[type]
if (xapiCount === undefined) {
xapiCount = xapiCounts[type.toLowerCase()]
}
return localCounts[type] !== xapiCount
})
)
}
}
}
@@ -1033,23 +1056,17 @@ export class Xapi extends EventEmitter {
const getObjectByRef = ref => this._objectsByRef[ref]
Record = defineProperty(
function(ref, data) {
defineProperties(this, {
$id: { value: data.uuid ?? ref },
$ref: { value: ref },
$xapi: { value: xapi },
})
for (let i = 0; i < nFields; ++i) {
const field = fields[i]
this[field] = data[field]
}
},
'name',
{
value: type,
Record = function(ref, data) {
defineProperties(this, {
$id: { value: data.uuid ?? ref },
$ref: { value: ref },
$xapi: { value: xapi },
})
for (let i = 0; i < nFields; ++i) {
const field = fields[i]
this[field] = data[field]
}
)
}
const getters = { $pool: getPool }
const props = { $type: type }
@@ -1069,14 +1086,9 @@ export class Xapi extends EventEmitter {
}
}
props[`add_${field}`] = function(value) {
props[`add_to_${field}`] = function(...values) {
return xapi
.call(`${type}.add_${field}`, this.$ref, value)
.then(noop)
}
props[`remove_${field}`] = function(value) {
return xapi
.call(`${type}.remove_${field}`, this.$ref, value)
.call(`${type}.add_${field}`, this.$ref, values)
.then(noop)
}
} else if (value !== null && typeof value === 'object') {

View File

@@ -25,8 +25,5 @@
},
"dependencies": {
"xo-common": "^0.2.0"
},
"scripts": {
"postversion": "npm publish"
}
}

View File

@@ -64,7 +64,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -43,7 +43,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -45,7 +45,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -43,7 +43,7 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@types/node": "^12.0.2",
"@types/node": "^11.11.4",
"@types/through2": "^2.0.31",
"tslint": "^5.9.1",
"tslint-config-standard": "^8.0.1",
@@ -55,7 +55,6 @@
"lint": "tslint 'src/*.ts'",
"posttest": "yarn run lint",
"prepublishOnly": "yarn run build",
"start": "node dist/index.js",
"postversion": "npm publish"
"start": "node dist/index.js"
}
}

View File

@@ -32,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"jsonrpc-websocket-client": "^0.5.0",
"jsonrpc-websocket-client": "^0.4.1",
"lodash": "^4.17.2",
"make-error": "^1.0.4"
},
@@ -49,7 +49,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
"prepublishOnly": "yarn run build"
}
}

View File

@@ -41,7 +41,6 @@
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepare": "yarn run build",
"postversion": "npm publish"
"prepare": "yarn run build"
}
}

View File

@@ -41,6 +41,5 @@
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -49,6 +49,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-ldap",
"version": "0.6.5",
"version": "0.6.4",
"license": "AGPL-3.0",
"description": "LDAP authentication plugin for XO-Server",
"keywords": [
@@ -55,6 +55,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -234,7 +234,6 @@ class AuthLdap {
entry.objectName
} => ${username} authenticated`
)
logger(JSON.stringify(entry, null, 2))
return { username }
} catch (error) {
logger(`failed to bind as ${entry.objectName}: ${error.message}`)

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.6.0",
"version": "0.5.3",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [
@@ -33,7 +33,7 @@
"node": ">=6"
},
"dependencies": {
"passport-saml": "^1.1.0"
"passport-saml": "^1.0.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -24,10 +24,7 @@ export const configurationSchema = {
},
usernameField: {
title: 'Username field',
description: `Field to use as the XO username
You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress\` if you are using Microsoft Azure Active Directory.
`,
description: 'Field to use as the XO username',
type: 'string',
},
},

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.16.2",
"version": "0.15.0",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -36,7 +36,6 @@
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/log": "^0.1.4",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -44,8 +43,6 @@
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
@@ -58,6 +55,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -1,11 +1,8 @@
import createLogger from '@xen-orchestra/log'
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, groupBy, startCase } from 'lodash'
import { forEach, get, startCase } from 'lodash'
import pkg from '../package'
const logger = createLogger('xo:xo-server-backup-reports')
export const configurationSchema = {
type: 'object',
@@ -49,9 +46,6 @@ export const testSchema = {
// ===================================================================
const INDENT = ' '
const UNKNOWN_ITEM = 'Unknown'
const ICON_FAILURE = '🚨'
const ICON_INTERRUPTED = '⚠️'
const ICON_SKIPPED = '⏩'
@@ -66,7 +60,7 @@ const STATUS_ICON = {
}
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormatter = timezone =>
const createDateFormater = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
@@ -92,6 +86,10 @@ const formatSpeed = (bytes, milliseconds) =>
})
: 'N/A'
const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
@@ -102,116 +100,40 @@ const isSkippedError = error =>
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
error.message === NO_SUCH_OBJECT_ERROR
// ===================================================================
const INDENT = ' '
const createGetTemporalDataMarkdown = formatDate => (
start,
end,
nbIndent = 0
) => {
const indent = INDENT.repeat(nbIndent)
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
const TITLE_BY_STATUS = {
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
interrupted: n => `## ${n} Interrupted`,
skipped: n => `## ${n} Skipped`,
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
}
const getTemporalDataMarkdown = (end, start, formatDate) => {
const markdown = [`- **Start time**: ${formatDate(start)}`]
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
if (end !== undefined) {
markdown.push(`- **End time**: ${formatDate(end)}`)
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
const duration = end - start
if (duration >= 1) {
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
}
}
return markdown
}
const getWarningsMarkdown = (warnings = []) =>
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
const getErrorMarkdown = task => {
let message
if (
task.status === 'success' ||
(message = task.result?.message ?? task.result?.code) === undefined
) {
const addWarnings = (text, warnings, nbIndent = 0) => {
if (warnings === undefined) {
return
}
const label = task.status === 'skipped' ? 'Reason' : 'Error'
return `- **${label}**: ${message}`
const indent = INDENT.repeat(nbIndent)
warnings.forEach(({ message }) => {
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
})
}
const MARKDOWN_BY_TYPE = {
pool(task, { formatDate }) {
const { id, pool = {}, poolMaster = {} } = task.data
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
return {
body: [
pool.uuid !== undefined
? `- **UUID**: ${pool.uuid}`
: `- **ID**: ${id}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[pool] ${name}`,
}
},
xo(task, { formatDate, jobName }) {
return {
body: [
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[XO] ${jobName}`,
}
},
async remote(task, { formatDate, xo }) {
const id = task.data.id
const name = await xo.getRemote(id).then(
({ name }) => name,
error => {
logger.warn(error)
return UNKNOWN_ITEM
}
)
return {
body: [
`- **ID**: ${id}`,
...getTemporalDataMarkdown(task.end, task.start, formatDate),
getErrorMarkdown(task),
],
title: `[remote] ${name}`,
}
},
}
const getMarkdown = (task, props) =>
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
const toMarkdown = parts => {
const lines = []
let indentLevel = 0
const helper = part => {
if (typeof part === 'string') {
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
} else if (Array.isArray(part)) {
++indentLevel
part.forEach(helper)
--indentLevel
}
}
helper(parts)
return lines.join('\n')
}
// ===================================================================
class BackupReportsXoPlugin {
constructor(xo) {
this._xo = xo
this._report = this._report.bind(this)
this._report = this._wrapper.bind(this)
}
configure({ toMails, toXmpp }) {
@@ -224,174 +146,72 @@ class BackupReportsXoPlugin {
}
test({ runId }) {
return this._report(runId, undefined, true)
return this._backupNgListener(undefined, undefined, undefined, runId)
}
unload() {
this._xo.removeListener('job:terminated', this._report)
}
async _report(runJobId, { type, status } = {}, force) {
const xo = this._xo
try {
if (type === 'call') {
return this._legacyVmHandler(status)
}
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
}
const reportWhen = log.data.reportWhen
if (
!force &&
(reportWhen === 'never' ||
// Handle improper value introduced by:
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
reportWhen === 'Never' ||
(reportWhen === 'failure' && log.status === 'success'))
) {
return
}
const [job, schedule] = await Promise.all([
await xo.getJob(log.jobId),
await xo.getSchedule(log.scheduleId).catch(error => {
logger.warn(error)
}),
])
if (job.type === 'backup') {
return this._ngVmHandler(log, job, schedule, force)
} else if (job.type === 'metadataBackup') {
return this._metadataHandler(log, job, schedule, force)
}
throw new Error(`Unknown backup job type: ${job.type}`)
} catch (error) {
logger.warn(error)
_wrapper(status, job, schedule, runJobId) {
if (job.type === 'metadataBackup') {
return
}
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule, runJobId)
: this._listener(status, job, schedule, runJobId)
)
).catch(logError)
}
async _metadataHandler(log, { name: jobName }, schedule, force) {
async _backupNgListener(_1, _2, schedule, runJobId) {
const xo = this._xo
const formatDate = createDateFormatter(schedule?.timezone)
const tasksByStatus = groupBy(log.tasks, 'status')
const n = log.tasks?.length ?? 0
const nSuccesses = tasksByStatus.success?.length ?? 0
if (!force && log.data.reportWhen === 'failure') {
delete tasksByStatus.success
const log = await xo.getBackupNgLogs(runJobId)
if (log === undefined) {
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
}
// header
const markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Job name**: ${jobName}`,
`- **Run ID**: ${log.id}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
...getWarningsMarkdown(log.warnings),
getErrorMarkdown(log),
]
const nagiosText = []
// body
for (const status of STATUS) {
const tasks = tasksByStatus[status]
if (tasks === undefined) {
continue
}
// tasks header
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
// tasks body
for (const task of tasks) {
const taskMarkdown = await getMarkdown(task, {
formatDate,
jobName: log.jobName,
})
if (taskMarkdown === undefined) {
continue
}
const { title, body } = taskMarkdown
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
if (task.status !== 'success') {
nagiosText.push(`[${task.status}] ${title}`)
}
for (const subTask of task.tasks ?? []) {
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
if (taskMarkdown === undefined) {
continue
}
const icon = STATUS_ICON[subTask.status]
const { title, body } = taskMarkdown
subMarkdown.push([
`- **${title}** ${icon}`,
[...body, ...getWarningsMarkdown(subTask.warnings)],
])
}
markdown.push('', '', `### ${title}`, ...subMarkdown)
}
}
// footer
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
return this._sendReport({
subject: `[Xen Orchestra] ${log.status} Metadata backup report for ${
log.jobName
} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Metadata backup report for ${
log.jobName
}`
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
log.jobName
} - ${nagiosText.join(' ')}`,
})
}
async _ngVmHandler(log, { name: jobName }, schedule, force) {
const xo = this._xo
const { reportWhen, mode } = log.data || {}
if (
reportWhen === 'never' ||
(log.status === 'success' && reportWhen === 'failure')
) {
return
}
const formatDate = createDateFormatter(schedule?.timezone)
if (schedule === undefined) {
schedule = await xo.getSchedule(log.scheduleId)
}
if (log.tasks === undefined) {
const markdown = [
const jobName = (await xo.getJob(log.jobId, 'backup')).name
const formatDate = createDateFormater(schedule.timezone)
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
if (
(log.status === 'failure' || log.status === 'skipped') &&
log.result !== undefined
) {
let markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${log.id}`,
`- **Run ID**: ${runJobId}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
getErrorMarkdown(log),
...getWarningsMarkdown(log.warnings),
'---',
'',
`*${pkg.name} v${pkg.version}*`,
...getTemporalDataMarkdown(log.start, log.end),
`- **Error**: ${log.result.message}`,
]
addWarnings(markdown, log.warnings)
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown: toMarkdown(markdown),
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${
log.status
@@ -411,7 +231,7 @@ class BackupReportsXoPlugin {
let nSkipped = 0
let nInterrupted = 0
for (const taskLog of log.tasks) {
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
if (taskLog.status === 'success' && reportWhen === 'failure') {
continue
}
@@ -424,16 +244,16 @@ class BackupReportsXoPlugin {
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
...getWarningsMarkdown(taskLog.warnings),
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
]
addWarnings(text, taskLog.warnings)
const failedSubTasks = []
const snapshotText = []
const srsText = []
const remotesText = []
for (const subTaskLog of taskLog.tasks ?? []) {
for (const subTaskLog of taskLog.tasks || []) {
if (
subTaskLog.message !== 'export' &&
subTaskLog.message !== 'snapshot'
@@ -442,36 +262,29 @@ class BackupReportsXoPlugin {
}
const icon = STATUS_ICON[subTaskLog.status]
const type = subTaskLog.data?.type
const errorMarkdown = getErrorMarkdown(subTaskLog)
const errorMessage = ` - **Error**: ${get(
subTaskLog.result,
'message'
)}`
if (subTaskLog.message === 'snapshot') {
snapshotText.push(`- **Snapshot** ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
])
} else if (type === 'remote') {
snapshotText.push(
`- **Snapshot** ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
)
} else if (subTaskLog.data.type === 'remote') {
const id = subTaskLog.data.id
const remote = await xo.getRemote(id).catch(error => {
logger.warn(error)
})
const title = remote !== undefined ? remote.name : `Remote Not found`
remotesText.push(`- **${title}** (${id}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
const remote = await xo.getRemote(id).catch(() => {})
remotesText.push(
` - **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${id}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(remotesText, subTaskLog.warnings, 2)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : id)
remotesText.push('', errorMessage)
}
} else {
const id = subTaskLog.data.id
@@ -481,17 +294,14 @@ class BackupReportsXoPlugin {
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
...getTemporalDataMarkdown(
subTaskLog.end,
subTaskLog.start,
formatDate
),
...getWarningsMarkdown(subTaskLog.warnings),
errorMarkdown,
])
srsText.push(
` - **${srName}** (${srUuid}) ${icon}`,
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
)
addWarnings(srsText, subTaskLog.warnings, 2)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
srsText.push('', errorMessage)
}
}
@@ -503,48 +313,53 @@ class BackupReportsXoPlugin {
return
}
const size = operationLog.result?.size
if (size > 0) {
const operationInfoText = []
addWarnings(operationInfoText, operationLog.warnings, 3)
if (operationLog.status === 'success') {
const size = operationLog.result.size
if (operationLog.message === 'merge') {
globalMergeSize += size
} else {
globalTransferSize += size
}
}
operationInfoText.push(
` - **Size**: ${formatSize(size)}`,
` - **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`
)
} else if (get(operationLog.result, 'message') !== undefined) {
operationInfoText.push(
` - **Error**: ${get(operationLog.result, 'message')}`
)
}
const operationText = [
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
[
...getTemporalDataMarkdown(
operationLog.end,
operationLog.start,
formatDate
),
size > 0 && `- **Size**: ${formatSize(size)}`,
size > 0 &&
`- **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`,
...getWarningsMarkdown(operationLog.warnings),
getErrorMarkdown(operationLog),
],
]
if (type === 'remote') {
` - **${operationLog.message}** ${
STATUS_ICON[operationLog.status]
}`,
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
...operationInfoText,
].join('\n')
if (get(subTaskLog, 'data.type') === 'remote') {
remotesText.push(operationText)
} else if (type === 'SR') {
remotesText.join('\n')
}
if (get(subTaskLog, 'data.type') === 'SR') {
srsText.push(operationText)
srsText.join('\n')
}
})
}
const subText = [
...snapshotText,
srsText.length !== 0 && `- **SRs**`,
srsText,
remotesText.length !== 0 && `- **Remotes**`,
remotesText,
]
if (srsText.length !== 0) {
srsText.unshift(`- **SRs**`)
}
if (remotesText.length !== 0) {
remotesText.unshift(`- **Remotes**`)
}
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
if (taskLog.result !== undefined) {
if (taskLog.status === 'skipped') {
++nSkipped
@@ -554,7 +369,8 @@ class BackupReportsXoPlugin {
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: taskLog.result.message
}`
}`,
''
)
nagiosText.push(
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -563,7 +379,11 @@ class BackupReportsXoPlugin {
)
} else {
++nFailures
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
failedVmsText.push(
...text,
`- **Error**: ${taskLog.result.message}`,
''
)
nagiosText.push(
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
@@ -574,7 +394,7 @@ class BackupReportsXoPlugin {
} else {
if (taskLog.status === 'failure') {
++nFailures
failedVmsText.push(...text, ...subText)
failedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[${
vm !== undefined ? vm.name_label : 'undefined'
@@ -582,34 +402,37 @@ class BackupReportsXoPlugin {
)
} else if (taskLog.status === 'interrupted') {
++nInterrupted
interruptedVmsText.push(...text, ...subText)
interruptedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
)
} else {
successfulVmsText.push(...text, ...subText)
successfulVmsText.push(...text, '', '', ...subText, '')
}
}
}
const nVms = log.tasks.length
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
const markdown = [
let markdown = [
`## Global status: ${log.status}`,
'',
`- **Job ID**: ${log.jobId}`,
`- **Run ID**: ${log.id}`,
`- **Run ID**: ${runJobId}`,
`- **mode**: ${mode}`,
...getTemporalDataMarkdown(log.end, log.start, formatDate),
...getTemporalDataMarkdown(log.start, log.end),
`- **Successes**: ${nSuccesses} / ${nVms}`,
globalTransferSize !== 0 &&
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
globalMergeSize !== 0 &&
`- **Merge size**: ${formatSize(globalMergeSize)}`,
...getWarningsMarkdown(log.warnings),
'',
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
addWarnings(markdown, log.warnings)
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
@@ -634,7 +457,7 @@ class BackupReportsXoPlugin {
)
}
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
if (nSuccesses !== 0 && reportWhen !== 'failure') {
markdown.push(
'---',
'',
@@ -645,8 +468,9 @@ class BackupReportsXoPlugin {
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown: toMarkdown(markdown),
markdown,
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
@@ -686,9 +510,9 @@ class BackupReportsXoPlugin {
])
}
_legacyVmHandler(status) {
_listener(status) {
const { calls, timezone, error } = status
const formatDate = createDateFormatter(timezone)
const formatDate = createDateFormater(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =

View File

@@ -33,7 +33,7 @@
},
"dependencies": {
"http-request-plus": "^0.8.0",
"jsonrpc-websocket-client": "^0.5.0"
"jsonrpc-websocket-client": "^0.4.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -49,6 +49,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -44,6 +44,5 @@
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -42,6 +42,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -1,3 +0,0 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,43 +0,0 @@
# xo-server-sdn-controller [![Build Status](https://travis-ci.org/vatesfr/xen-orchestra.png?branch=master)](https://travis-ci.org/vatesfr/xen-orchestra)
XO Server plugin that allows the creation of pool-wide private networks.
## Install
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
## Usage
### Network creation
In the network creation view, select a `pool` and `Private network`.
Create the network.
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
### Configuration
Like all other xo-server plugins, it can be configured directly via
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
The plugin's configuration contains:
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
If none is provided, the plugin will create its own self-signed certificates.
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
## Contributions
Contributions are *very* welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
AGPL3 © [Vates SAS](http://vates.fr)

View File

@@ -1,35 +0,0 @@
{
"name": "xo-server-sdn-controller",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-sdn-controller",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "packages/xo-server-sdn-controller",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"main": "./dist",
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"version": "0.1.0",
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.4",
"@babel/preset-env": "^7.4.4",
"cross-env": "^5.2.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.1.4",
"lodash": "^4.17.11",
"node-openssl-cert": "^0.0.81",
"promise-toolbox": "^0.13.0"
},
"private": true
}

View File

@@ -1,830 +0,0 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import NodeOpenssl from 'node-openssl-cert'
import { access, constants, readFile, writeFile } from 'fs'
import { EventEmitter } from 'events'
import { filter, find, forOwn, map } from 'lodash'
import { fromCallback, fromEvent } from 'promise-toolbox'
import { join } from 'path'
import { OvsdbClient } from './ovsdb-client'
// =============================================================================
const log = createLogger('xo:xo-server:sdn-controller')
const PROTOCOL = 'pssl'
const CA_CERT = 'ca-cert.pem'
const CLIENT_KEY = 'client-key.pem'
const CLIENT_CERT = 'client-cert.pem'
const SDN_CONTROLLER_CERT = 'sdn-controller-ca.pem'
const NB_DAYS = 9999
// =============================================================================
export const configurationSchema = {
type: 'object',
properties: {
'cert-dir': {
description: `Full path to a directory where to find: \`client-cert.pem\`,
\`client-key.pem\` and \`ca-cert.pem\` to create ssl connections with hosts.
If none is provided, the plugin will create its own self-signed certificates.`,
type: 'string',
},
'override-certs': {
description: `Replace already existing SDN controller CA certificate`,
type: 'boolean',
default: false,
},
},
}
// =============================================================================
async function fileWrite(path, data) {
await fromCallback(writeFile, path, data)
log.debug(`${path} successfully written`)
}
async function fileRead(path) {
const result = await fromCallback(readFile, path)
return result
}
async function fileExists(path) {
try {
await fromCallback(access, path, constants.F_OK)
} catch (error) {
if (error.code === 'ENOENT') {
return false
}
throw error
}
return true
}
// =============================================================================
class SDNController extends EventEmitter {
constructor({ xo, getDataDir }) {
super()
this._xo = xo
this._getDataDir = getDataDir
this._clientKey = null
this._clientCert = null
this._caCert = null
this._poolNetworks = []
this._ovsdbClients = []
this._newHosts = []
this._networks = new Map()
this._starCenters = new Map()
this._cleaners = []
this._objectsAdded = this._objectsAdded.bind(this)
this._objectsUpdated = this._objectsUpdated.bind(this)
this._overrideCerts = false
this._unsetApiMethod = null
}
// ---------------------------------------------------------------------------
async configure(configuration) {
this._overrideCerts = configuration['override-certs']
let certDirectory = configuration['cert-dir']
if (certDirectory == null) {
log.debug(`No cert-dir provided, using default self-signed certificates`)
certDirectory = await this._getDataDir()
if (!(await fileExists(join(certDirectory, CA_CERT)))) {
// If one certificate doesn't exist, none should
assert(
!(await fileExists(join(certDirectory, CLIENT_KEY))),
`${CLIENT_KEY} should not exist`
)
assert(
!(await fileExists(join(certDirectory, CLIENT_CERT))),
`${CLIENT_CERT} should not exist`
)
log.debug(`No default self-signed certificates exists, creating them`)
await this._generateCertificatesAndKey(certDirectory)
}
}
// TODO: verify certificates and create new certificates if needed
;[this._clientKey, this._clientCert, this._caCert] = await Promise.all([
fileRead(join(certDirectory, CLIENT_KEY)),
fileRead(join(certDirectory, CLIENT_CERT)),
fileRead(join(certDirectory, CA_CERT)),
])
this._ovsdbClients.forEach(client => {
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
})
const updatedPools = []
for (let i = 0; i < this._poolNetworks.length; ++i) {
const poolNetwork = this._poolNetworks[i]
if (updatedPools.includes(poolNetwork.pool)) {
continue
}
const xapi = this._xo.getXapi(poolNetwork.pool)
await this._installCaCertificateIfNeeded(xapi)
updatedPools.push(poolNetwork.pool)
}
}
async load() {
const createPrivateNetwork = this._createPrivateNetwork.bind(this)
createPrivateNetwork.description =
'Creates a pool-wide private network on a selected pool'
createPrivateNetwork.params = {
poolId: { type: 'string' },
networkName: { type: 'string' },
networkDescription: { type: 'string' },
encapsulation: { type: 'string' },
}
createPrivateNetwork.resolve = {
xoPool: ['poolId', 'pool', ''],
}
this._unsetApiMethod = this._xo.addApiMethod(
'plugin.SDNController.createPrivateNetwork',
createPrivateNetwork
)
// FIXME: we should monitor when xapis are added/removed
forOwn(this._xo.getAllXapis(), async xapi => {
await xapi.objectsFetched
if (this._setControllerNeeded(xapi) === false) {
this._cleaners.push(await this._manageXapi(xapi))
const hosts = filter(xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
this._createOvsdbClient(host)
})
)
// Add already existing pool-wide private networks
const networks = filter(xapi.objects.all, { $type: 'network' })
forOwn(networks, async network => {
if (network.other_config.private_pool_wide === 'true') {
log.debug(
`Adding network: '${network.name_label}' for pool: '${
network.$pool.name_label
}' to managed networks`
)
const center = await this._electNewCenter(network, true)
this._poolNetworks.push({
pool: network.$pool.$ref,
network: network.$ref,
starCenter: center ? center.$ref : null,
})
this._networks.set(network.$id, network.$ref)
if (center != null) {
this._starCenters.set(center.$id, center.$ref)
}
}
})
}
})
}
async unload() {
this._ovsdbClients = []
this._poolNetworks = []
this._newHosts = []
this._networks.clear()
this._starCenters.clear()
this._cleaners.forEach(cleaner => cleaner())
this._cleaners = []
this._unsetApiMethod()
}
// ===========================================================================
async _createPrivateNetwork({
xoPool,
networkName,
networkDescription,
encapsulation,
}) {
const pool = this._xo.getXapiObject(xoPool)
await this._setPoolControllerIfNeeded(pool)
// Create the private network
const privateNetworkRef = await pool.$xapi.call('network.create', {
name_label: networkName,
name_description: networkDescription,
MTU: 0,
other_config: {
automatic: 'false',
private_pool_wide: 'true',
encapsulation: encapsulation,
},
})
const privateNetwork = await pool.$xapi._getOrWaitObject(privateNetworkRef)
log.info(
`Private network '${
privateNetwork.name_label
}' has been created for pool '${pool.name_label}'`
)
// For each pool's host, create a tunnel to the private network
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
await this._createTunnel(host, privateNetwork)
this._createOvsdbClient(host)
})
)
const center = await this._electNewCenter(privateNetwork, false)
this._poolNetworks.push({
pool: pool.$ref,
network: privateNetwork.$ref,
starCenter: center ? center.$ref : null,
encapsulation: encapsulation,
})
this._networks.set(privateNetwork.$id, privateNetwork.$ref)
if (center != null) {
this._starCenters.set(center.$id, center.$ref)
}
}
// ---------------------------------------------------------------------------
async _manageXapi(xapi) {
const { objects } = xapi
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
objects.on('add', this._objectsAdded)
objects.on('update', this._objectsUpdated)
objects.on('remove', objectsRemovedXapi)
await this._installCaCertificateIfNeeded(xapi)
return () => {
objects.removeListener('add', this._objectsAdded)
objects.removeListener('update', this._objectsUpdated)
objects.removeListener('remove', objectsRemovedXapi)
}
}
async _objectsAdded(objects) {
await Promise.all(
map(objects, async object => {
const { $type } = object
if ($type === 'host') {
log.debug(
`New host: '${object.name_label}' in pool: '${
object.$pool.name_label
}'`
)
if (find(this._newHosts, { $ref: object.$ref }) == null) {
this._newHosts.push(object)
}
this._createOvsdbClient(object)
}
})
)
}
async _objectsUpdated(objects) {
await Promise.all(
map(objects, async (object, id) => {
const { $type } = object
if ($type === 'PIF') {
await this._pifUpdated(object)
} else if ($type === 'host') {
await this._hostUpdated(object)
}
})
)
}
async _objectsRemoved(xapi, objects) {
await Promise.all(
map(objects, async (object, id) => {
const client = find(this._ovsdbClients, { id: id })
if (client != null) {
this._ovsdbClients.splice(this._ovsdbClients.indexOf(client), 1)
}
// If a Star center host is removed: re-elect a new center where needed
const starCenterRef = this._starCenters.get(id)
if (starCenterRef != null) {
this._starCenters.delete(id)
const poolNetworks = filter(this._poolNetworks, {
starCenter: starCenterRef,
})
for (let i = 0; i < poolNetworks.length; ++i) {
const poolNetwork = poolNetworks[i]
const network = await xapi._getOrWaitObject(poolNetwork.network)
const newCenter = await this._electNewCenter(network, true)
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
}
return
}
// If a network is removed, clean this._poolNetworks from it
const networkRef = this._networks.get(id)
if (networkRef != null) {
this._networks.delete(id)
const poolNetwork = find(this._poolNetworks, {
network: networkRef,
})
if (poolNetwork != null) {
this._poolNetworks.splice(
this._poolNetworks.indexOf(poolNetwork),
1
)
}
}
})
)
}
async _pifUpdated(pif) {
// Only if PIF is in a private network
const poolNetwork = find(this._poolNetworks, { network: pif.network })
if (poolNetwork == null) {
return
}
if (!pif.currently_attached) {
if (poolNetwork.starCenter !== pif.host) {
return
}
log.debug(
`PIF: '${pif.device}' of network: '${
pif.$network.name_label
}' star-center host: '${
pif.$host.name_label
}' has been unplugged, electing a new host`
)
const newCenter = await this._electNewCenter(pif.$network, true)
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
this._starCenters.delete(pif.$host.$id)
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
} else {
if (poolNetwork.starCenter == null) {
const host = pif.$host
log.debug(
`First available host: '${
host.name_label
}' becomes star center of network: '${pif.$network.name_label}'`
)
poolNetwork.starCenter = pif.host
this._starCenters.set(host.$id, host.$ref)
}
log.debug(
`PIF: '${pif.device}' of network: '${pif.$network.name_label}' host: '${
pif.$host.name_label
}' has been plugged`
)
const starCenter = await pif.$xapi._getOrWaitObject(
poolNetwork.starCenter
)
await this._addHostToNetwork(pif.$host, pif.$network, starCenter)
}
}
async _hostUpdated(host) {
const xapi = host.$xapi
if (host.enabled) {
if (host.PIFs.length === 0) {
return
}
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
const newHost = find(this._newHosts, { $ref: host.$ref })
if (newHost != null) {
this._newHosts.splice(this._newHosts.indexOf(newHost), 1)
try {
await xapi.call('pool.certificate_sync')
} catch (error) {
log.error(
`Couldn't sync SDN controller ca certificate in pool: '${
host.$pool.name_label
}' because: ${error}`
)
}
}
for (let i = 0; i < tunnels.length; ++i) {
const tunnel = tunnels[i]
const accessPIF = await xapi._getOrWaitObject(tunnel.access_PIF)
if (accessPIF.host !== host.$ref) {
continue
}
const poolNetwork = find(this._poolNetworks, {
network: accessPIF.network,
})
if (poolNetwork == null) {
continue
}
if (accessPIF.currently_attached) {
continue
}
log.debug(
`Pluging PIF: '${accessPIF.device}' for host: '${
host.name_label
}' on network: '${accessPIF.$network.name_label}'`
)
try {
await xapi.call('PIF.plug', accessPIF.$ref)
} catch (error) {
log.error(
`XAPI error while pluging PIF: '${accessPIF.device}' on host: '${
host.name_label
}' for network: '${accessPIF.$network.name_label}'`
)
}
const starCenter = await host.$xapi._getOrWaitObject(
poolNetwork.starCenter
)
await this._addHostToNetwork(host, accessPIF.$network, starCenter)
}
} else {
const poolNetworks = filter(this._poolNetworks, { starCenter: host.$ref })
for (let i = 0; i < poolNetworks.length; ++i) {
const poolNetwork = poolNetworks[i]
const network = await host.$xapi._getOrWaitObject(poolNetwork.network)
log.debug(
`Star center host: '${host.name_label}' of network: '${
network.name_label
}' in pool: '${
host.$pool.name_label
}' is no longer reachable, electing a new host`
)
const newCenter = await this._electNewCenter(network, true)
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
this._starCenters.delete(host.$id)
if (newCenter != null) {
this._starCenters.set(newCenter.$id, newCenter.$ref)
}
}
}
}
// ---------------------------------------------------------------------------
async _setPoolControllerIfNeeded(pool) {
if (!this._setControllerNeeded(pool.$xapi)) {
// Nothing to do
return
}
const controller = find(pool.$xapi.objects.all, { $type: 'SDN_controller' })
if (controller != null) {
await pool.$xapi.call('SDN_controller.forget', controller.$ref)
log.debug(`Remove old SDN controller from pool: '${pool.name_label}'`)
}
await pool.$xapi.call('SDN_controller.introduce', PROTOCOL)
log.debug(`Set SDN controller of pool: '${pool.name_label}'`)
this._cleaners.push(await this._manageXapi(pool.$xapi))
}
_setControllerNeeded(xapi) {
const controller = find(xapi.objects.all, { $type: 'SDN_controller' })
return !(
controller != null &&
controller.protocol === PROTOCOL &&
controller.address === '' &&
controller.port === 0
)
}
// ---------------------------------------------------------------------------
async _installCaCertificateIfNeeded(xapi) {
let needInstall = false
try {
const result = await xapi.call('pool.certificate_list')
if (!result.includes(SDN_CONTROLLER_CERT)) {
needInstall = true
} else if (this._overrideCerts) {
await xapi.call('pool.certificate_uninstall', SDN_CONTROLLER_CERT)
log.debug(
`Old SDN Controller CA certificate uninstalled on pool: '${
xapi.pool.name_label
}'`
)
needInstall = true
}
} catch (error) {
log.error(
`Couldn't retrieve certificate list of pool: '${xapi.pool.name_label}'`
)
}
if (!needInstall) {
return
}
try {
await xapi.call(
'pool.certificate_install',
SDN_CONTROLLER_CERT,
this._caCert.toString()
)
await xapi.call('pool.certificate_sync')
log.debug(
`SDN controller CA certificate install in pool: '${
xapi.pool.name_label
}'`
)
} catch (error) {
log.error(
`Couldn't install SDN controller CA certificate in pool: '${
xapi.pool.name_label
}' because: ${error}`
)
}
}
// ---------------------------------------------------------------------------
async _electNewCenter(network, resetNeeded) {
const pool = network.$pool
let newCenter = null
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
await Promise.all(
map(hosts, async host => {
if (resetNeeded) {
// Clean old ports and interfaces
const hostClient = find(this._ovsdbClients, { host: host.$ref })
if (hostClient != null) {
try {
await hostClient.resetForNetwork(network.uuid, network.name_label)
} catch (error) {
log.error(
`Couldn't reset network: '${network.name_label}' for host: '${
host.name_label
}' in pool: '${network.$pool.name_label}' because: ${error}`
)
return
}
}
}
if (newCenter != null) {
return
}
const pif = find(host.$PIFs, { network: network.$ref })
if (pif != null && pif.currently_attached && host.enabled) {
newCenter = host
}
})
)
if (newCenter == null) {
log.error(
`Unable to elect a new star-center host to network: '${
network.name_label
}' for pool: '${
network.$pool.name_label
}' because there's no available host`
)
return null
}
// Recreate star topology
await Promise.all(
await map(hosts, async host => {
await this._addHostToNetwork(host, network, newCenter)
})
)
log.info(
`New star center host elected: '${newCenter.name_label}' in network: '${
network.name_label
}'`
)
return newCenter
}
async _createTunnel(host, network) {
const pif = find(host.$PIFs, { physical: true })
if (pif == null) {
log.error(
`No PIF found to create tunnel on host: '${
host.name_label
}' for network: '${network.name_label}'`
)
return
}
await host.$xapi.call('tunnel.create', pif.$ref, network.$ref)
log.debug(
`Tunnel added on host '${host.name_label}' for network '${
network.name_label
}'`
)
}
async _addHostToNetwork(host, network, starCenter) {
if (host.$ref === starCenter.$ref) {
// Nothing to do
return
}
const hostClient = find(this._ovsdbClients, {
host: host.$ref,
})
if (hostClient == null) {
log.error(`No OVSDB client found for host: '${host.name_label}'`)
return
}
const starCenterClient = find(this._ovsdbClients, {
host: starCenter.$ref,
})
if (starCenterClient == null) {
log.error(
`No OVSDB client found for star-center host: '${starCenter.name_label}'`
)
return
}
const encapsulation =
network.other_config.encapsulation != null
? network.other_config.encapsulation
: 'gre'
try {
await hostClient.addInterfaceAndPort(
network.uuid,
network.name_label,
starCenterClient.address,
encapsulation
)
await starCenterClient.addInterfaceAndPort(
network.uuid,
network.name_label,
hostClient.address,
encapsulation
)
} catch (error) {
log.error(
`Couldn't add host: '${host.name_label}' to network: '${
network.name_label
}' in pool: '${host.$pool.name_label}' because: ${error}`
)
}
}
// ---------------------------------------------------------------------------
_createOvsdbClient(host) {
const foundClient = find(this._ovsdbClients, { host: host.$ref })
if (foundClient != null) {
return foundClient
}
const client = new OvsdbClient(
host,
this._clientKey,
this._clientCert,
this._caCert
)
this._ovsdbClients.push(client)
return client
}
// ---------------------------------------------------------------------------
async _generateCertificatesAndKey(dataDir) {
const openssl = new NodeOpenssl()
const rsakeyoptions = {
rsa_keygen_bits: 4096,
format: 'PKCS8',
}
const subject = {
countryName: 'XX',
localityName: 'Default City',
organizationName: 'Default Company LTD',
}
const csroptions = {
hash: 'sha256',
startdate: new Date('1984-02-04 00:00:00'),
enddate: new Date('2143-06-04 04:16:23'),
subject: subject,
}
const cacsroptions = {
hash: 'sha256',
days: NB_DAYS,
subject: subject,
}
openssl.generateRSAPrivateKey(rsakeyoptions, (err, cakey, cmd) => {
if (err) {
log.error(`Error while generating CA private key: ${err}`)
return
}
openssl.generateCSR(cacsroptions, cakey, null, (err, csr, cmd) => {
if (err) {
log.error(`Error while generating CA certificate: ${err}`)
return
}
openssl.selfSignCSR(
csr,
cacsroptions,
cakey,
null,
async (err, cacrt, cmd) => {
if (err) {
log.error(`Error while signing CA certificate: ${err}`)
return
}
await fileWrite(join(dataDir, CA_CERT), cacrt)
openssl.generateRSAPrivateKey(
rsakeyoptions,
async (err, key, cmd) => {
if (err) {
log.error(`Error while generating private key: ${err}`)
return
}
await fileWrite(join(dataDir, CLIENT_KEY), key)
openssl.generateCSR(csroptions, key, null, (err, csr, cmd) => {
if (err) {
log.error(`Error while generating certificate: ${err}`)
return
}
openssl.CASignCSR(
csr,
cacsroptions,
false,
cacrt,
cakey,
null,
async (err, crt, cmd) => {
if (err) {
log.error(`Error while signing certificate: ${err}`)
return
}
await fileWrite(join(dataDir, CLIENT_CERT), crt)
this.emit('certWritten')
}
)
})
}
)
}
)
})
})
await fromEvent(this, 'certWritten', {})
log.debug('All certificates have been successfully written')
}
}
export default opts => new SDNController(opts)

View File

@@ -1,511 +0,0 @@
import assert from 'assert'
import createLogger from '@xen-orchestra/log'
import forOwn from 'lodash/forOwn'
import fromEvent from 'promise-toolbox/fromEvent'
import { connect } from 'tls'
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
const OVSDB_PORT = 6640
// =============================================================================
export class OvsdbClient {
constructor(host, clientKey, clientCert, caCert) {
this._host = host
this._numberOfPortAndInterface = 0
this._requestID = 0
this.updateCertificates(clientKey, clientCert, caCert)
log.debug(`[${this._host.name_label}] New OVSDB client`)
}
// ---------------------------------------------------------------------------
get address() {
return this._host.address
}
get host() {
return this._host.$ref
}
get id() {
return this._host.$id
}
updateCertificates(clientKey, clientCert, caCert) {
this._clientKey = clientKey
this._clientCert = clientCert
this._caCert = caCert
log.debug(`[${this._host.name_label}] Certificates have been updated`)
}
// ---------------------------------------------------------------------------
async addInterfaceAndPort(
networkUuid,
networkName,
remoteAddress,
encapsulation
) {
const socket = await this._connect()
const index = this._numberOfPortAndInterface
++this._numberOfPortAndInterface
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid == null) {
socket.destroy()
return
}
const alreadyExist = await this._interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
)
if (alreadyExist) {
socket.destroy()
return
}
const interfaceName = 'tunnel_iface' + index
const portName = 'tunnel_port' + index
// Add interface and port to the bridge
const options = ['map', [['remote_ip', remoteAddress]]]
const addInterfaceOperation = {
op: 'insert',
table: 'Interface',
row: {
type: encapsulation,
options: options,
name: interfaceName,
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_iface',
}
const addPortOperation = {
op: 'insert',
table: 'Port',
row: {
name: portName,
interfaces: ['set', [['named-uuid', 'new_iface']]],
other_config: ['map', [['private_pool_wide', 'true']]],
},
'uuid-name': 'new_port',
}
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
}
const params = [
'Open_vSwitch',
addInterfaceOperation,
addPortOperation,
mutateBridgeOperation,
]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
socket.destroy()
return
}
let error
let details
let i = 0
let opResult
do {
opResult = jsonObjects[0].result[i]
if (opResult != null && opResult.error != null) {
error = opResult.error
details = opResult.details
}
++i
} while (opResult && !error)
if (error != null) {
log.error(
`[${
this._host.name_label
}] Error while adding port: '${portName}' and interface: '${interfaceName}' to bridge: '${bridgeName}' on network: '${networkName}' because: ${error}: ${details}`
)
socket.destroy()
return
}
log.debug(
`[${
this._host.name_label
}] Port: '${portName}' and interface: '${interfaceName}' added to bridge: '${bridgeName}' on network: '${networkName}'`
)
socket.destroy()
}
async resetForNetwork(networkUuid, networkName) {
const socket = await this._connect()
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
networkUuid,
networkName,
socket
)
if (bridgeUuid == null) {
socket.destroy()
return
}
// Delete old ports created by a SDN controller
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports == null) {
socket.destroy()
return
}
const portsToDelete = []
for (let i = 0; i < ports.length; ++i) {
const portUuid = ports[i][1]
const where = [['_uuid', '==', ['uuid', portUuid]]]
const selectResult = await this._select(
'Port',
['name', 'other_config'],
where,
socket
)
if (selectResult == null) {
continue
}
forOwn(selectResult.other_config[1], config => {
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
log.debug(
`[${this._host.name_label}] Adding port: '${
selectResult.name
}' to delete list from bridge: '${bridgeName}'`
)
portsToDelete.push(['uuid', portUuid])
}
})
}
if (portsToDelete.length === 0) {
// Nothing to do
socket.destroy()
return
}
const mutateBridgeOperation = {
op: 'mutate',
table: 'Bridge',
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
mutations: [['ports', 'delete', ['set', portsToDelete]]],
}
const params = ['Open_vSwitch', mutateBridgeOperation]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
socket.destroy()
return
}
if (jsonObjects[0].error != null) {
log.error(
`[${
this._host.name_label
}] Couldn't delete ports from bridge: '${bridgeName}' because: ${
jsonObjects.error
}`
)
socket.destroy()
return
}
log.debug(
`[${this._host.name_label}] Deleted ${
jsonObjects[0].result[0].count
} ports from bridge: '${bridgeName}'`
)
socket.destroy()
}
// ===========================================================================
_parseJson(chunk) {
let data = chunk.toString()
let buffer = ''
let depth = 0
let pos = 0
const objects = []
for (let i = pos; i < data.length; ++i) {
const c = data.charAt(i)
if (c === '{') {
depth++
} else if (c === '}') {
depth--
if (depth === 0) {
const object = JSON.parse(buffer + data.substr(0, i + 1))
objects.push(object)
buffer = ''
data = data.substr(i + 1)
pos = 0
i = -1
}
}
}
buffer += data
return objects
}
// ---------------------------------------------------------------------------
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
const where = [
[
'external_ids',
'includes',
['map', [['xs-network-uuids', networkUuid]]],
],
]
const selectResult = await this._select(
'Bridge',
['_uuid', 'name'],
where,
socket
)
if (selectResult == null) {
return [null, null]
}
const bridgeUuid = selectResult._uuid[1]
const bridgeName = selectResult.name
log.debug(
`[${
this._host.name_label
}] Found bridge: '${bridgeName}' for network: '${networkName}'`
)
return [bridgeUuid, bridgeName]
}
async _interfaceAndPortAlreadyExist(
bridgeUuid,
bridgeName,
remoteAddress,
socket
) {
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
if (ports == null) {
return
}
for (let i = 0; i < ports.length; ++i) {
const portUuid = ports[i][1]
const interfaces = await this._getPortInterfaces(portUuid, socket)
if (interfaces == null) {
continue
}
let j
for (j = 0; j < interfaces.length; ++j) {
const interfaceUuid = interfaces[j][1]
const hasRemote = await this._interfaceHasRemote(
interfaceUuid,
remoteAddress,
socket
)
if (hasRemote === true) {
return true
}
}
}
return false
}
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
const selectResult = await this._select('Bridge', ['ports'], where, socket)
if (selectResult == null) {
return null
}
return selectResult.ports[0] === 'set'
? selectResult.ports[1]
: [selectResult.ports]
}
async _getPortInterfaces(portUuid, socket) {
const where = [['_uuid', '==', ['uuid', portUuid]]]
const selectResult = await this._select(
'Port',
['name', 'interfaces'],
where,
socket
)
if (selectResult == null) {
return null
}
return selectResult.interfaces[0] === 'set'
? selectResult.interfaces[1]
: [selectResult.interfaces]
}
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
const where = [['_uuid', '==', ['uuid', interfaceUuid]]]
const selectResult = await this._select(
'Interface',
['name', 'options'],
where,
socket
)
if (selectResult == null) {
return false
}
for (let i = 0; i < selectResult.options[1].length; ++i) {
const option = selectResult.options[1][i]
if (option[0] === 'remote_ip' && option[1] === remoteAddress) {
return true
}
}
return false
}
// ---------------------------------------------------------------------------
async _select(table, columns, where, socket) {
const selectOperation = {
op: 'select',
table: table,
columns: columns,
where: where,
}
const params = ['Open_vSwitch', selectOperation]
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
if (jsonObjects == null) {
return
}
const jsonResult = jsonObjects[0].result[0]
if (jsonResult.error != null) {
log.error(
`[${
this._host.name_label
}] Couldn't retrieve: '${columns}' in: '${table}' because: ${
jsonResult.error
}: ${jsonResult.details}`
)
return null
}
if (jsonResult.rows.length === 0) {
log.error(
`[${
this._host.name_label
}] No '${columns}' found in: '${table}' where: '${where}'`
)
return null
}
// For now all select operations should return only 1 row
assert(
jsonResult.rows.length === 1,
`[${
this._host.name_label
}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
)
return jsonResult.rows[0]
}
async _sendOvsdbTransaction(params, socket) {
const stream = socket
const requestId = this._requestID
++this._requestID
const req = {
id: requestId,
method: 'transact',
params: params,
}
try {
stream.write(JSON.stringify(req))
} catch (error) {
log.error(
`[${this._host.name_label}] Error while writing into stream: ${error}`
)
return null
}
let result
let jsonObjects
let resultRequestId
do {
try {
result = await fromEvent(stream, 'data', {})
} catch (error) {
log.error(
`[${
this._host.name_label
}] Error while waiting for stream data: ${error}`
)
return null
}
jsonObjects = this._parseJson(result)
resultRequestId = jsonObjects[0].id
} while (resultRequestId !== requestId)
return jsonObjects
}
// ---------------------------------------------------------------------------
async _connect() {
const options = {
ca: this._caCert,
key: this._clientKey,
cert: this._clientCert,
host: this._host.address,
port: OVSDB_PORT,
rejectUnauthorized: false,
requestCert: false,
}
const socket = connect(options)
try {
await fromEvent(socket, 'secureConnect', {})
} catch (error) {
log.error(
`[${this._host.name_label}] TLS connection failed because: ${error}: ${
error.code
}`
)
throw error
}
log.debug(`[${this._host.name_label}] TLS connection successful`)
socket.on('error', error => {
log.error(
`[${
this._host.name_label
}] OVSDB client socket error: ${error} with code: ${error.code}`
)
})
return socket
}
}

View File

@@ -32,7 +32,7 @@
"node": ">=6"
},
"dependencies": {
"nodemailer": "^6.1.0",
"nodemailer": "^5.0.0",
"nodemailer-markdown": "^1.0.1",
"promise-toolbox": "^0.12.1"
},
@@ -50,6 +50,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -49,6 +49,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -50,6 +50,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -39,7 +39,7 @@
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/log": "^0.1.4",
"handlebars": "^4.0.6",
"html-minifier": "^4.0.0",
"html-minifier": "^3.5.8",
"human-format": "^0.10.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.12.1"
@@ -59,6 +59,5 @@
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"private": true
}
}

View File

@@ -49,17 +49,6 @@ maxTokenValidity = '0.5 year'
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
# Duration for which we can wait for the backup size before returning
#
# It should be short to avoid blocking the display of the available backups.
vmBackupSizeTimeout = '2 seconds'
# Helmet handles HTTP security via headers
#
# https://helmetjs.github.io/docs/
#[http.helmet.hsts]
#includeSubDomains = false
[[http.listen]]
port = 80
@@ -79,7 +68,6 @@ honorCipherOrder = true
secureOptions = 117440512
[http.mounts]
'/' = '../xo-web/dist'
[remoteOptions]
mountsDir = '/run/xo-server/mounts'

View File

@@ -1,7 +1,6 @@
{
"private": true,
"name": "xo-server",
"version": "5.43.0",
"version": "5.38.2",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -38,11 +37,11 @@
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.9.0",
"@xen-orchestra/fs": "^0.8.0",
"@xen-orchestra/log": "^0.1.4",
"@xen-orchestra/mixin": "^0.0.0",
"ajv": "^6.1.1",
"app-conf": "^0.7.0",
"app-conf": "^0.6.1",
"archiver": "^3.0.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^3.0.0",
@@ -51,7 +50,7 @@
"body-parser": "^1.18.2",
"compression": "^1.7.3",
"connect-flash": "^0.1.1",
"cookie": "^0.4.0",
"cookie": "^0.3.1",
"cookie-parser": "^1.4.3",
"d3-time-format": "^2.1.1",
"debug": "^4.0.1",
@@ -65,7 +64,7 @@
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^8.0.1",
"fs-extra": "^7.0.0",
"get-stream": "^4.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.7.1",
@@ -110,7 +109,7 @@
"readable-stream": "^3.2.0",
"redis": "^2.8.0",
"schema-inspector": "^1.6.8",
"semver": "^6.0.0",
"semver": "^5.4.1",
"serve-static": "^1.13.1",
"split-lines": "^2.0.0",
"stack-chain": "^2.0.0",
@@ -118,18 +117,18 @@
"struct-fu": "^1.2.0",
"tar-stream": "^2.0.1",
"through2": "^3.0.0",
"tmp": "^0.1.0",
"tmp": "^0.0.33",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.7.0",
"vhd-lib": "^0.6.0",
"ws": "^6.0.0",
"xen-api": "^0.26.0",
"xen-api": "^0.25.1",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.4.1",
"xo-collection": "^0.4.1",
"xo-common": "^0.2.0",
"xo-remote-parser": "^0.5.0",
"xo-vmdk-to-vhd": "^0.1.7",
"xo-vmdk-to-vhd": "^0.1.6",
"yazl": "^2.4.3"
},
"devDependencies": {

View File

@@ -117,7 +117,7 @@ port = 80
# List of files/directories which will be served.
[http.mounts]
#'/any/url' = '/path/to/directory'
#'/' = '/path/to/xo-web/dist/'
# List of proxied URLs (HTTP & WebSockets).
[http.proxies]

View File

@@ -1,5 +0,0 @@
import fromCallback from 'promise-toolbox/fromCallback'
import { execFile } from 'child_process'
export const read = key =>
fromCallback(cb => execFile('xenstore-read', [key], cb))

View File

@@ -183,7 +183,6 @@ getLogs.params = {
after: { type: ['number', 'string'], optional: true },
before: { type: ['number', 'string'], optional: true },
limit: { type: 'number', optional: true },
'*': { type: 'any' },
}
// -----------------------------------------------------------------------------

View File

@@ -1,6 +1,6 @@
import createLogger from '@xen-orchestra/log'
import pump from 'pump'
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
import { noSuchObject } from 'xo-common/api-errors'
import { parseSize } from '../utils'
@@ -128,7 +128,7 @@ async function handleImportContent(req, res, { xapi, id }) {
res.end(format.response(0, true))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}

View File

@@ -1,22 +1,26 @@
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
// ===================================================================
export async function set({
host,
multipathing,
// TODO: use camel case.
name_label: nameLabel,
name_description: nameDescription,
}) {
host = this.getXapiObject(host)
const xapi = this.getXapi(host)
const hostId = host._xapiId
await Promise.all([
nameDescription !== undefined && host.set_name_description(nameDescription),
nameLabel !== undefined && host.set_name_label(nameLabel),
multipathing !== undefined &&
host.$xapi.setHostMultipathing(host.$id, multipathing),
])
if (multipathing !== undefined) {
await xapi.setHostMultipathing(hostId, multipathing)
}
return xapi.setHostProperties(hostId, {
nameLabel,
nameDescription,
})
}
set.description = 'changes the properties of an host'
@@ -211,25 +215,6 @@ emergencyShutdownHost.resolve = {
// -------------------------------------------------------------------
export async function isHostServerTimeConsistent({ host }) {
try {
await this.getXapi(host).assertConsistentHostServerTime(host._xapiRef)
return true
} catch (e) {
return false
}
}
isHostServerTimeConsistent.params = {
host: { type: 'string' },
}
isHostServerTimeConsistent.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export function stats({ host, granularity }) {
return this.getXapiHostStats(host._xapiId, granularity)
}
@@ -263,7 +248,7 @@ async function handleInstallSupplementalPack(req, res, { hostId }) {
res.end(format.response(0))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}
@@ -284,19 +269,3 @@ installSupplementalPack.params = {
installSupplementalPack.resolve = {
host: ['host', 'host', 'admin'],
}
// -------------------------------------------------------------------
export function isHyperThreadingEnabled({ host }) {
return this.getXapi(host).isHyperThreadingEnabled(host._xapiId)
}
isHyperThreadingEnabled.description = 'get hyper-threading information'
isHyperThreadingEnabled.params = {
id: { type: 'string' },
}
isHyperThreadingEnabled.resolve = {
host: ['id', 'host', 'administrate'],
}

View File

@@ -85,26 +85,18 @@ createBonded.description =
// ===================================================================
export async function set({
network,
automatic,
defaultIsLocked,
name_description: nameDescription,
name_label: nameLabel,
network,
}) {
network = this.getXapiObject(network)
await Promise.all([
automatic !== undefined &&
network.update_other_config('automatic', automatic ? 'true' : null),
defaultIsLocked !== undefined &&
network.set_default_locking_mode(
defaultIsLocked ? 'disabled' : 'unlocked'
),
nameDescription !== undefined &&
network.set_name_description(nameDescription),
nameLabel !== undefined && network.set_name_label(nameLabel),
])
await this.getXapi(network).setNetworkProperties(network._xapiId, {
automatic,
defaultIsLocked,
nameDescription,
nameLabel,
})
}
set.params = {

View File

@@ -5,7 +5,7 @@
async function delete_({ PBD }) {
// TODO: check if PBD is attached before
await this.getXapi(PBD).callAsync('PBD.destroy', PBD._xapiRef)
await this.getXapi(PBD).call('PBD.destroy', PBD._xapiRef)
}
export { delete_ as delete }
@@ -37,7 +37,7 @@ disconnect.resolve = {
export async function connect({ PBD }) {
// TODO: check if PBD is attached before
await this.getXapi(PBD).callAsync('PBD.plug', PBD._xapiRef)
await this.getXapi(PBD).call('PBD.plug', PBD._xapiRef)
}
connect.params = {

View File

@@ -1,7 +1,5 @@
// TODO: too low level, move into host.
import { filter, find } from 'lodash'
import { IPV4_CONFIG_MODES, IPV6_CONFIG_MODES } from '../xapi'
export function getIpv4ConfigurationModes() {
@@ -17,17 +15,7 @@ export function getIpv6ConfigurationModes() {
async function delete_({ pif }) {
// TODO: check if PIF is attached before
const xapi = this.getXapi(pif)
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
const tunnel = find(tunnels, { access_PIF: pif._xapiRef })
if (tunnel != null) {
await xapi.callAsync('PIF.unplug', pif._xapiRef)
await xapi.callAsync('tunnel.destroy', tunnel.$ref)
return
}
await xapi.callAsync('PIF.destroy', pif._xapiRef)
await this.getXapi(pif).call('PIF.destroy', pif._xapiRef)
}
export { delete_ as delete }
@@ -44,7 +32,7 @@ delete_.resolve = {
export async function disconnect({ pif }) {
// TODO: check if PIF is attached before
await this.getXapi(pif).callAsync('PIF.unplug', pif._xapiRef)
await this.getXapi(pif).call('PIF.unplug', pif._xapiRef)
}
disconnect.params = {
@@ -59,7 +47,7 @@ disconnect.resolve = {
export async function connect({ pif }) {
// TODO: check if PIF is attached before
await this.getXapi(pif).callAsync('PIF.plug', pif._xapiRef)
await this.getXapi(pif).call('PIF.plug', pif._xapiRef)
}
connect.params = {

View File

@@ -1,19 +1,18 @@
import { format, JsonRPcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
// ===================================================================
export async function set({
pool,
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel,
}) {
pool = this.getXapiObject(pool)
await Promise.all([
nameDescription !== undefined && pool.set_name_description(nameDescription),
nameLabel !== undefined && pool.set_name_label(nameLabel),
])
await this.getXapi(pool).setPoolProperties({
nameDescription,
nameLabel,
})
}
set.params = {
@@ -235,7 +234,7 @@ async function handleInstallSupplementalPack(req, res, { poolId }) {
res.end(format.response(0))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRPcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}

View File

@@ -10,15 +10,14 @@ import { forEach, parseXml } from '../utils'
export async function set({
sr,
// TODO: use camel case.
name_description: nameDescription,
name_label: nameLabel,
}) {
sr = this.getXapiObject(sr)
await Promise.all([
nameDescription !== undefined && sr.set_name_description(nameDescription),
nameLabel !== undefined && sr.set_name_label(nameLabel),
])
await this.getXapi(sr).setSrProperties(sr._xapiId, {
nameDescription,
nameLabel,
})
}
set.params = {
@@ -36,7 +35,7 @@ set.resolve = {
// -------------------------------------------------------------------
export async function scan({ SR }) {
await this.getXapi(SR).callAsync('SR.scan', SR._xapiRef)
await this.getXapi(SR).call('SR.scan', SR._xapiRef)
}
scan.params = {
@@ -180,35 +179,6 @@ createIso.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export async function createFile({
host,
nameLabel,
nameDescription,
location,
}) {
const xapi = this.getXapi(host)
return xapi.createSr({
hostRef: host._xapiRef,
name_label: nameLabel,
name_description: nameDescription,
type: 'file',
device_config: { location },
})
}
createFile.params = {
host: { type: 'string' },
nameLabel: { type: 'string' },
nameDescription: { type: 'string' },
location: { type: 'string' },
}
createFile.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// NFS SR
@@ -391,58 +361,6 @@ createExt.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect all ZFS pools
// Return a dict of pools with their parameters { <poolname>: {<paramdict>}}
// example output (the parameter mountpoint is of interest):
// {"tank":
// {
// "setuid": "on", "relatime": "off", "referenced": "24K", "written": "24K", "zoned": "off", "primarycache": "all",
// "logbias": "latency", "creation": "Mon May 27 17:24 2019", "sync": "standard", "snapdev": "hidden",
// "dedup": "off", "sharenfs": "off", "usedbyrefreservation": "0B", "sharesmb": "off", "createtxg": "1",
// "canmount": "on", "mountpoint": "/tank", "casesensitivity": "sensitive", "utf8only": "off", "xattr": "on",
// "dnodesize": "legacy", "mlslabel": "none", "objsetid": "54", "defcontext": "none", "rootcontext": "none",
// "mounted": "yes", "compression": "off", "overlay": "off", "logicalused": "47K", "usedbysnapshots": "0B",
// "filesystem_count": "none", "copies": "1", "snapshot_limit": "none", "aclinherit": "restricted",
// "compressratio": "1.00x", "readonly": "off", "version": "5", "normalization": "none", "filesystem_limit": "none",
// "type": "filesystem", "secondarycache": "all", "refreservation": "none", "available": "17.4G", "used": "129K",
// "exec": "on", "refquota": "none", "refcompressratio": "1.00x", "quota": "none", "keylocation": "none",
// "snapshot_count": "none", "fscontext": "none", "vscan": "off", "reservation": "none", "atime": "on",
// "recordsize": "128K", "usedbychildren": "105K", "usedbydataset": "24K", "guid": "656061077639704004",
// "pbkdf2iters": "0", "checksum": "on", "special_small_blocks": "0", "redundant_metadata": "all",
// "volmode": "default", "devices": "on", "keyformat": "none", "logicalreferenced": "12K", "acltype": "off",
// "nbmand": "off", "context": "none", "encryption": "off", "snapdir": "hidden"}}
export async function probeZfs({ host }) {
const xapi = this.getXapi(host)
try {
const result = await xapi.call(
'host.call_plugin',
host._xapiRef,
'zfs.py',
'list_zfs_pools',
{}
)
return JSON.parse(result)
} catch (error) {
if (
error.code === 'XENAPI_MISSING_PLUGIN' ||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
) {
return {}
} else {
throw error
}
}
}
probeZfs.params = {
host: { type: 'string' },
}
probeZfs.resolve = {
host: ['host', 'host', 'administrate'],
}
// -------------------------------------------------------------------
// This function helps to detect all NFS shares (exports) on a NFS server
// Return a table of exports with their paths and ACLs

View File

@@ -1,5 +1,5 @@
export async function add({ tag, object }) {
await this.getXapiObject(object).add_tags(tag)
await this.getXapi(object).addTag(object._xapiId, tag)
}
add.description = 'add a new tag to an object'
@@ -16,7 +16,7 @@ add.params = {
// -------------------------------------------------------------------
export async function remove({ tag, object }) {
await this.getXapiObject(object).remove_tags(tag)
await this.getXapi(object).removeTag(object._xapiId, tag)
}
remove.description = 'remove an existing tag from an object'

View File

@@ -34,25 +34,3 @@ delete_.permission = 'admin'
delete_.params = {
token: { type: 'string' },
}
// -------------------------------------------------------------------
export async function deleteAll({ except }) {
await this.deleteAuthenticationTokens({
filter: {
user_id: this.session.get('user_id'),
id: {
__not: except,
},
},
})
}
deleteAll.description =
'delete all tokens of the current user except the current one'
deleteAll.permission = ''
deleteAll.params = {
except: { type: 'string', optional: true },
}

View File

@@ -48,7 +48,8 @@ connect.resolve = {
export async function set({ position, vbd }) {
if (position !== undefined) {
await this.getXapiObject(vbd).set_userdevice(String(position))
const xapi = this.getXapi(vbd)
await xapi.call('VBD.set_userdevice', vbd._xapiRef, String(position))
}
}
@@ -66,7 +67,9 @@ set.resolve = {
// -------------------------------------------------------------------
export async function setBootable({ vbd, bootable }) {
await this.getXapiObject(vbd).set_bootable(bootable)
const xapi = this.getXapi(vbd)
await xapi.call('VBD.set_bootable', vbd._xapiRef, bootable)
}
setBootable.params = {

View File

@@ -64,7 +64,6 @@ export async function set({
allowedIpv4Addresses,
allowedIpv6Addresses,
attached,
rateLimit,
}) {
const oldIpAddresses = vif.allowedIpv4Addresses.concat(
vif.allowedIpv6Addresses
@@ -92,9 +91,6 @@ export async function set({
mac,
currently_attached: attached,
ipv4_allowed: newIpAddresses,
qos_algorithm_type: rateLimit != null ? 'ratelimit' : undefined,
qos_algorithm_params:
rateLimit != null ? { kbps: String(rateLimit) } : undefined,
})
await this.allocIpAddresses(newVif.$id, newIpAddresses)
@@ -111,7 +107,6 @@ export async function set({
return this.getXapi(vif).editVif(vif._xapiId, {
ipv4Allowed: allowedIpv4Addresses,
ipv6Allowed: allowedIpv6Addresses,
rateLimit,
})
}
@@ -134,11 +129,6 @@ set.params = {
optional: true,
},
attached: { type: 'boolean', optional: true },
rateLimit: {
description: 'in kilobytes per seconds',
optional: true,
type: ['number', 'null'],
},
}
set.resolve = {

View File

@@ -1,5 +1,5 @@
import defer from 'golike-defer'
import { format, JsonRpcError } from 'json-rpc-peer'
import { format } from 'json-rpc-peer'
import { ignoreErrors } from 'promise-toolbox'
import { assignWith, concat } from 'lodash'
import {
@@ -193,11 +193,6 @@ create.params = {
optional: true,
},
networkConfig: {
type: 'string',
optional: true,
},
coreOs: {
type: 'boolean',
optional: true,
@@ -320,11 +315,6 @@ create.params = {
},
},
},
hvmBootFirmware: { type: 'string', optional: true },
// other params are passed to `editVm`
'*': { type: 'any' },
}
create.resolve = {
@@ -565,8 +555,6 @@ set.params = {
// Identifier of the VM to update.
id: { type: 'string' },
auto_poweron: { type: 'boolean', optional: true },
name_label: { type: 'string', optional: true },
name_description: { type: 'string', optional: true },
@@ -610,7 +598,7 @@ set.params = {
// Switch from Cirrus video adaptor to VGA adaptor
vga: { type: 'string', optional: true },
videoram: { type: 'number', optional: true },
videoram: { type: ['string', 'number'], optional: true },
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
@@ -628,9 +616,6 @@ set.params = {
// set the VM network interface controller
nicType: { type: ['string', 'null'], optional: true },
// set the VM boot firmware mode
hvmBootFirmware: { type: ['string', 'null'], optional: true },
}
set.resolve = {
@@ -640,7 +625,13 @@ set.resolve = {
// -------------------------------------------------------------------
export async function restart({ vm, force = false }) {
return this.getXapi(vm).rebootVm(vm._xapiId, { hard: force })
const xapi = this.getXapi(vm)
if (force) {
await xapi.call('VM.hard_reboot', vm._xapiRef)
} else {
await xapi.call('VM.clean_reboot', vm._xapiRef)
}
}
restart.params = {
@@ -741,7 +732,7 @@ export async function convertToTemplate({ vm }) {
// Convert to a template requires pool admin permission.
await this.checkPermissions(this.user.id, [[vm.$pool, 'administrate']])
await this.getXapiObject(vm).set_is_a_template(true)
await this.getXapi(vm).call('VM.set_is_a_template', vm._xapiRef, true)
}
convertToTemplate.params = {
@@ -1093,7 +1084,7 @@ stop.resolve = {
// -------------------------------------------------------------------
export async function suspend({ vm }) {
await this.getXapi(vm).callAsync('VM.suspend', vm._xapiRef)
await this.getXapi(vm).call('VM.suspend', vm._xapiRef)
}
suspend.params = {
@@ -1107,7 +1098,7 @@ suspend.resolve = {
// -------------------------------------------------------------------
export async function pause({ vm }) {
await this.getXapi(vm).callAsync('VM.pause', vm._xapiRef)
await this.getXapi(vm).call('VM.pause', vm._xapiRef)
}
pause.params = {
@@ -1207,7 +1198,7 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
res.end(format.response(0, vm.$id))
} catch (e) {
res.writeHead(500)
res.end(format.error(0, new JsonRpcError(e.message)))
res.end(format.error(0, new Error(e.message)))
}
}
@@ -1370,7 +1361,9 @@ createInterface.resolve = {
// -------------------------------------------------------------------
export async function attachPci({ vm, pciId }) {
await this.getXapiObject(vm).update_other_config('pci', pciId)
const xapi = this.getXapi(vm)
await xapi.call('VM.add_to_other_config', vm._xapiRef, 'pci', pciId)
}
attachPci.params = {
@@ -1385,7 +1378,9 @@ attachPci.resolve = {
// -------------------------------------------------------------------
export async function detachPci({ vm }) {
await this.getXapiObject(vm).update_other_config('pci', null)
const xapi = this.getXapi(vm)
await xapi.call('VM.remove_from_other_config', vm._xapiRef, 'pci')
}
detachPci.params = {
@@ -1418,11 +1413,15 @@ stats.resolve = {
// -------------------------------------------------------------------
export async function setBootOrder({ vm, order }) {
if (vm.virtualizationMode !== 'hvm') {
throw invalidParameters('You can only set the boot order on a HVM guest')
const xapi = this.getXapi(vm)
order = { order }
if (vm.virtualizationMode === 'hvm') {
await xapi.call('VM.set_HVM_boot_params', vm._xapiRef, order)
return
}
await this.getXapiObject(vm).update_HVM_boot_params('order', order)
throw invalidParameters('You can only set the boot order on a HVM guest')
}
setBootOrder.params = {

View File

@@ -55,7 +55,6 @@ getAllObjects.description = 'Returns all XO objects'
getAllObjects.params = {
filter: { type: 'object', optional: true },
limit: { type: 'number', optional: true },
ndjson: { type: 'boolean', optional: true },
}
// -------------------------------------------------------------------

View File

@@ -269,10 +269,10 @@ export async function fixHostNotInNetwork({ xosanSr, host }) {
if (pif) {
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
reconfigurePifIP(xapi, pif, newIP)
await xapi.callAsync('PIF.plug', pif.$ref)
await xapi.call('PIF.plug', pif.$ref)
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
if (PBD) {
await xapi.callAsync('PBD.plug', PBD.$ref)
await xapi.call('PBD.plug', PBD.$ref)
}
const sshKey = await getOrCreateSshKey(xapi)
await callPlugin(xapi, host, 'receive_ssh_keys', {
@@ -809,7 +809,7 @@ export const createSR = defer(async function(
})
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 6 }
log.debug('scanning new SR')
await xapi.callAsync('SR.scan', xosanSrRef)
await xapi.call('SR.scan', xosanSrRef)
await this.rebindLicense({
licenseId: license.id,
oldBoundObjectId: tmpBoundObjectId,
@@ -884,13 +884,13 @@ async function createVDIOnLVMWithoutSizeLimit(xapi, lvmSr, diskSize) {
if (result.exit !== 0) {
throw Error('Could not create volume ->' + result.stdout)
}
await xapi.callAsync('SR.scan', xapi.getObject(lvmSr).$ref)
await xapi.call('SR.scan', xapi.getObject(lvmSr).$ref)
const vdi = find(xapi.getObject(lvmSr).$VDIs, vdi => vdi.uuid === uuid)
if (vdi != null) {
await Promise.all([
vdi.set_name_description('Created by XO'),
vdi.set_name_label('xosan_data'),
])
await xapi.setSrProperties(vdi.$ref, {
nameLabel: 'xosan_data',
nameDescription: 'Created by XO',
})
return vdi
}
}
@@ -989,7 +989,7 @@ async function replaceBrickOnSameVM(
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 4 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1068,7 +1068,7 @@ export async function replaceBrick({
await xapi.deleteVm(previousVMEntry.vm, true)
}
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 3 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1115,7 +1115,7 @@ async function _prepareGlusterVm(
const firstVif = newVM.$VIFs[0]
if (xosanNetwork.$id !== firstVif.$network.$id) {
try {
await xapi.callAsync('VIF.move', firstVif.$ref, xosanNetwork.$ref)
await xapi.call('VIF.move', firstVif.$ref, xosanNetwork.$ref)
} catch (error) {
if (error.code === 'MESSAGE_METHOD_UNKNOWN') {
// VIF.move has been introduced in xenserver 7.0
@@ -1124,7 +1124,7 @@ async function _prepareGlusterVm(
}
}
}
await newVM.add_tags('XOSAN')
await xapi.addTag(newVM.$id, 'XOSAN')
await xapi.editVm(newVM, {
name_label: `XOSAN - ${lvmSr.name_label} - ${
host.name_label
@@ -1132,7 +1132,7 @@ async function _prepareGlusterVm(
name_description: 'Xosan VM storage',
memory: memorySize,
})
await newVM.set_xenstore_data(xenstoreData)
await xapi.call('VM.set_xenstore_data', newVM.$ref, xenstoreData)
const rootDisk = newVM.$VBDs
.map(vbd => vbd && vbd.$VDI)
.find(vdi => vdi && vdi.name_label === 'xosan_root')
@@ -1330,7 +1330,7 @@ export const addBricks = defer(async function(
data.nodes = data.nodes.concat(newNodes)
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 2 }
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
@@ -1382,7 +1382,7 @@ export const removeBricks = defer(async function($defer, { xosansr, bricks }) {
)
remove(data.nodes, node => ips.includes(node.vm.ip))
await xapi.xo.setData(xosansr.id, 'xosan_config', data)
await xapi.callAsync('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
await xapi.call('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
} finally {
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
@@ -1542,10 +1542,9 @@ export async function downloadAndInstallXosanPack({ id, version, pool }) {
const res = await this.requestResource('xosan', id, version)
await xapi.installSupplementalPackOnAllHosts(res)
await xapi.pool.update_other_config(
'xosan_pack_installation_time',
String(Math.floor(Date.now() / 1e3))
)
await xapi._updateObjectMapProperty(xapi.pool, 'other_config', {
xosan_pack_installation_time: String(Math.floor(Date.now() / 1e3)),
})
}
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'

View File

@@ -93,7 +93,7 @@ async function loadConfiguration() {
function createExpressApp(config) {
const app = createExpress()
app.use(helmet(config.http.helmet))
app.use(helmet())
app.use(compression())
@@ -417,7 +417,6 @@ const setUpProxies = (express, opts, xo) => {
}
const proxy = createProxyServer({
changeOrigin: true,
ignorePath: true,
}).on('error', error => console.error(error))

View File

@@ -2,8 +2,6 @@ import Collection from '../collection/redis'
import Model from '../model'
import { forEach } from '../utils'
import { parseProp } from './utils'
// ===================================================================
export default class Remote extends Model {}
@@ -16,21 +14,12 @@ export class Remotes extends Collection {
async get(properties) {
const remotes = await super.get(properties)
forEach(remotes, remote => {
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
remote.benchmarks =
remote.benchmarks !== undefined
? JSON.parse(remote.benchmarks)
: undefined
remote.enabled = remote.enabled === 'true'
})
return remotes
}
_update(remotes) {
return super._update(
remotes.map(remote => {
const { benchmarks } = remote
if (benchmarks !== undefined) {
remote.benchmarks = JSON.stringify(benchmarks)
}
return remote
})
)
}
}

View File

@@ -569,16 +569,6 @@ const TRANSFORMS = {
MAC: obj.MAC,
MTU: +obj.MTU,
// in kB/s
rateLimit: (() => {
if (obj.qos_algorithm_type === 'ratelimit') {
const { kbps } = obj.qos_algorithm_params
if (kbps !== undefined) {
return +kbps
}
}
})(),
$network: link(obj, 'network'),
$VM: link(obj, 'VM'),
}
@@ -643,7 +633,7 @@ const TRANSFORMS = {
description: poolPatch.name_description,
name: poolPatch.name_label,
pool_patch: poolPatch.$ref,
size: +poolPatch.size,
size: poolPatch.size,
guidance: poolPatch.after_apply_guidance,
time: toTimestamp(obj.timestamp_applied),

View File

@@ -1,4 +1,3 @@
/* eslint eslint-comments/disable-enable-pair: [error, {allowWholeFile: true}] */
/* eslint-disable camelcase */
import asyncMap from '@xen-orchestra/async-map'
import concurrency from 'limit-concurrency-decorator'
@@ -22,14 +21,13 @@ import { forbiddenOperation } from 'xo-common/api-errors'
import { Xapi as XapiBase, NULL_REF } from 'xen-api'
import {
every,
filter,
find,
filter,
flatMap,
flatten,
groupBy,
includes,
isEmpty,
noop,
omit,
startsWith,
uniq,
@@ -229,6 +227,14 @@ export default class Xapi extends XapiBase {
// =================================================================
_setObjectProperty(object, name, value) {
return this.call(
`${object.$type}.set_${camelToSnakeCase(name)}`,
object.$ref,
prepareXapiParam(value)
)
}
_setObjectProperties(object, props) {
const { $ref: ref, $type: type } = object
@@ -247,10 +253,101 @@ export default class Xapi extends XapiBase {
)::ignoreErrors()
}
async _updateObjectMapProperty(object, prop, values) {
const { $ref: ref, $type: type } = object
prop = camelToSnakeCase(prop)
const add = `${type}.add_to_${prop}`
const remove = `${type}.remove_from_${prop}`
await Promise.all(
mapToArray(values, (value, name) => {
if (value !== undefined) {
name = camelToSnakeCase(name)
const removal = this.call(remove, ref, name)
return value === null
? removal
: removal
::ignoreErrors()
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
}
})
)
}
async setHostProperties(id, { nameLabel, nameDescription }) {
await this._setObjectProperties(this.getObject(id), {
nameLabel,
nameDescription,
})
}
async setPoolProperties({ autoPoweron, nameLabel, nameDescription }) {
const { pool } = this
await Promise.all([
this._setObjectProperties(pool, {
nameLabel,
nameDescription,
}),
autoPoweron != null &&
this._updateObjectMapProperty(pool, 'other_config', {
autoPoweron: autoPoweron ? 'true' : null,
}),
])
}
async setSrProperties(id, { nameLabel, nameDescription }) {
await this._setObjectProperties(this.getObject(id), {
nameLabel,
nameDescription,
})
}
async setNetworkProperties(
id,
{ automatic, defaultIsLocked, nameDescription, nameLabel }
) {
let defaultLockingMode
if (defaultIsLocked != null) {
defaultLockingMode = defaultIsLocked ? 'disabled' : 'unlocked'
}
const network = this.getObject(id)
await Promise.all([
this._setObjectProperties(network, {
defaultLockingMode,
nameDescription,
nameLabel,
}),
this._updateObjectMapProperty(network, 'other_config', {
automatic:
automatic === undefined ? undefined : automatic ? 'true' : null,
}),
])
}
// =================================================================
setDefaultSr(srId) {
return this.pool.set_default_SR(this.getObject(srId).$ref)
async addTag(id, tag) {
const { $ref: ref, $type: type } = this.getObject(id)
await this.call(`${type}.add_tags`, ref, tag)
}
async removeTag(id, tag) {
const { $ref: ref, $type: type } = this.getObject(id)
await this.call(`${type}.remove_tags`, ref, tag)
}
// =================================================================
async setDefaultSr(srId) {
this._setObjectProperties(this.pool, {
default_SR: this.getObject(srId).$ref,
})
}
// =================================================================
@@ -279,12 +376,12 @@ export default class Xapi extends XapiBase {
await pSettle(
mapToArray(vms, vm => {
if (!vm.is_control_domain) {
return this.callAsync('VM.suspend', vm.$ref)
return this.call('VM.suspend', vm.$ref)
}
})
)
await this.call('host.disable', host.$ref)
await this.callAsync('host.shutdown', host.$ref)
await this.call('host.shutdown', host.$ref)
}
// =================================================================
@@ -297,7 +394,7 @@ export default class Xapi extends XapiBase {
await this.call('host.disable', ref)
try {
await this.callAsync('host.evacuate', ref)
await this.call('host.evacuate', ref)
} catch (error) {
if (!force) {
await this.call('host.enable', ref)
@@ -312,7 +409,7 @@ export default class Xapi extends XapiBase {
}
async forgetHost(hostId) {
await this.callAsync('host.destroy', this.getObject(hostId).$ref)
await this.call('host.destroy', this.getObject(hostId).$ref)
}
async ejectHostFromPool(hostId) {
@@ -347,7 +444,9 @@ export default class Xapi extends XapiBase {
$defer(() => this.plugPbd(ref))
})
return host.update_other_config(
return this._updateObjectMapProperty(
host,
'other_config',
multipathing
? {
multipathing: 'true',
@@ -360,23 +459,23 @@ export default class Xapi extends XapiBase {
}
async powerOnHost(hostId) {
await this.callAsync('host.power_on', this.getObject(hostId).$ref)
await this.call('host.power_on', this.getObject(hostId).$ref)
}
async rebootHost(hostId, force = false) {
const host = this.getObject(hostId)
await this._clearHost(host, force)
await this.callAsync('host.reboot', host.$ref)
await this.call('host.reboot', host.$ref)
}
async restartHostAgent(hostId) {
await this.callAsync('host.restart_agent', this.getObject(hostId).$ref)
await this.call('host.restart_agent', this.getObject(hostId).$ref)
}
async setRemoteSyslogHost(hostId, syslogDestination) {
const host = this.getObject(hostId)
await host.set_logging({
await this.call('host.set_logging', host.$ref, {
syslog_destination: syslogDestination,
})
await this.call('host.syslog_reconfigure', host.$ref)
@@ -386,7 +485,7 @@ export default class Xapi extends XapiBase {
const host = this.getObject(hostId)
await this._clearHost(host, force)
await this.callAsync('host.shutdown', host.$ref)
await this.call('host.shutdown', host.$ref)
}
// =================================================================
@@ -400,7 +499,7 @@ export default class Xapi extends XapiBase {
}`
)
return this.callAsync('VM.clone', vm.$ref, nameLabel).then(extractOpaqueRef)
return this.call('VM.clone', vm.$ref, nameLabel)
}
// Copy a VM: make a normal copy of a VM and all its VDIs.
@@ -471,7 +570,12 @@ export default class Xapi extends XapiBase {
stream = stream.pipe(sizeStream)
const onVmCreation =
nameLabel !== undefined ? vm => vm.set_name_label(nameLabel) : null
nameLabel !== undefined
? vm =>
targetXapi._setObjectProperties(vm, {
nameLabel,
})
: null
const vm = await targetXapi._getOrWaitObject(
await targetXapi._importVm(stream, sr, onVmCreation)
@@ -611,13 +715,17 @@ export default class Xapi extends XapiBase {
// It is necessary for suspended VMs to be shut down
// to be able to delete their VDIs.
if (vm.power_state !== 'Halted') {
await this.callAsync('VM.hard_shutdown', $ref)
await this.call('VM.hard_shutdown', $ref)
}
await Promise.all([
vm.set_is_a_template(false),
vm.update_blocked_operations('destroy', null),
vm.update_other_config('default_template', null),
this.call('VM.set_is_a_template', vm.$ref, false),
this._updateObjectMapProperty(vm, 'blocked_operations', {
destroy: null,
}),
this._updateObjectMapProperty(vm, 'other_config', {
default_template: null,
}),
])
// must be done before destroying the VM
@@ -625,7 +733,7 @@ export default class Xapi extends XapiBase {
// this cannot be done in parallel, otherwise disks and snapshots will be
// destroyed even if this fails
await this.callAsync('VM.destroy', $ref)
await this.call('VM.destroy', $ref)
return Promise.all([
asyncMap(vm.$snapshots, snapshot =>
@@ -956,21 +1064,23 @@ export default class Xapi extends XapiBase {
await this._createVmRecord({
...delta.vm,
affinity: null,
blocked_operations: {
...delta.vm.blocked_operations,
start: 'Importing…',
},
ha_always_run: false,
is_a_template: false,
name_label: `[Importing…] ${name_label}`,
other_config: {
...delta.vm.other_config,
[TAG_COPY_SRC]: delta.vm.uuid,
},
})
)
$defer.onFailure(() => this._deleteVm(vm))
await Promise.all([
this._setObjectProperties(vm, {
name_label: `[Importing…] ${name_label}`,
}),
this._updateObjectMapProperty(vm, 'blocked_operations', {
start: 'Importing…',
}),
this._updateObjectMapProperty(vm, 'other_config', {
[TAG_COPY_SRC]: delta.vm.uuid,
}),
])
// 2. Delete all VBDs which may have been created by the import.
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
@@ -992,7 +1102,9 @@ export default class Xapi extends XapiBase {
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi.$ref))
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
} else {
newVdi = await this.createVdi({
...vdi,
@@ -1087,15 +1199,15 @@ export default class Xapi extends XapiBase {
}
await Promise.all([
delta.vm.ha_always_run && vm.set_ha_always_run(true),
vm.set_name_label(name_label),
this._setObjectProperties(vm, {
name_label,
}),
// FIXME: move
vm.update_blocked_operations(
'start',
disableStartAfterImport
this._updateObjectMapProperty(vm, 'blocked_operations', {
start: disableStartAfterImport
? 'Do not start this VM, clone it if you want to use it.'
: null
),
: null,
}),
])
return { transferSize, vm }
@@ -1149,7 +1261,7 @@ export default class Xapi extends XapiBase {
)
const loop = () =>
this.callAsync(
this.call(
'VM.migrate_send',
vm.$ref,
token,
@@ -1163,7 +1275,7 @@ export default class Xapi extends XapiBase {
pDelay(1e4).then(loop)
)
return loop().then(noop)
return loop()
}
@synchronized()
@@ -1318,8 +1430,14 @@ export default class Xapi extends XapiBase {
$defer.onFailure(() => this._deleteVm(vm))
// Disable start and change the VM name label during import.
await Promise.all([
vm.update_blocked_operations('start', 'OVA import in progress...'),
vm.set_name_label(`[Importing...] ${nameLabel}`),
this.addForbiddenOperationToVm(
vm.$id,
'start',
'OVA import in progress...'
),
this._setObjectProperties(vm, {
name_label: `[Importing...] ${nameLabel}`,
}),
])
// 2. Create VDIs & Vifs.
@@ -1336,7 +1454,7 @@ export default class Xapi extends XapiBase {
$defer.onFailure(() => this._deleteVdi(vdi.$ref))
return this.createVbd({
userdevice: String(disk.position),
userdevice: disk.position,
vdi,
vm,
})
@@ -1379,8 +1497,8 @@ export default class Xapi extends XapiBase {
// Enable start and restore the VM name label after import.
await Promise.all([
vm.update_blocked_operations('start', null),
vm.set_name_label(nameLabel),
this.removeForbiddenOperationFromVm(vm.$id, 'start'),
this._setObjectProperties(vm, { name_label: nameLabel }),
])
return vm
}
@@ -1427,7 +1545,7 @@ export default class Xapi extends XapiBase {
})
} else {
try {
await this.callAsync('VM.pool_migrate', vm.$ref, host.$ref, {
await this.call('VM.pool_migrate', vm.$ref, host.$ref, {
force: 'true',
})
} catch (error) {
@@ -1512,11 +1630,19 @@ export default class Xapi extends XapiBase {
return /* await */ this._snapshotVm(this.getObject(vmId), nameLabel)
}
async setVcpuWeight(vmId, weight) {
weight = weight || null // Take all falsy values as a removal (0 included)
const vm = this.getObject(vmId)
await this._updateObjectMapProperty(vm, 'VCPUs_params', { weight })
}
async _startVm(vm, host, force) {
log.debug(`Starting VM ${vm.name_label}`)
if (force) {
await vm.update_blocked_operations('start', null)
await this._updateObjectMapProperty(vm, 'blocked_operations', {
start: null,
})
}
return host === undefined
@@ -1526,7 +1652,7 @@ export default class Xapi extends XapiBase {
false, // Start paused?
false // Skip pre-boot checks?
)
: this.callAsync('VM.start_on', vm.$ref, host.$ref, false, false)
: this.call('VM.start_on', vm.$ref, host.$ref, false, false)
}
async startVm(vmId, hostId, force) {
@@ -1555,12 +1681,16 @@ export default class Xapi extends XapiBase {
if (isVmHvm(vm)) {
const { order } = vm.HVM_boot_params
await vm.update_HVM_boot_params('order', 'd')
await this._updateObjectMapProperty(vm, 'HVM_boot_params', {
order: 'd',
})
try {
await this._startVm(vm)
} finally {
await vm.update_HVM_boot_params('order', order)
await this._updateObjectMapProperty(vm, 'HVM_boot_params', {
order,
})
}
} else {
// Find the original template by name (*sigh*).
@@ -1582,14 +1712,20 @@ export default class Xapi extends XapiBase {
const cdDrive = this._getVmCdDrive(vm)
forEach(vm.$VBDs, vbd => {
promises.push(vbd.set_bootable(vbd === cdDrive))
promises.push(
this._setObjectProperties(vbd, {
bootable: vbd === cdDrive,
})
)
bootables.push([vbd, Boolean(vbd.bootable)])
})
promises.push(
vm.set_PV_bootloader('eliloader'),
vm.update_other_config({
this._setObjectProperties(vm, {
PV_bootloader: 'eliloader',
}),
this._updateObjectMapProperty(vm, 'other_config', {
'install-distro':
template && template.other_config['install-distro'],
'install-repository': 'cdrom',
@@ -1600,15 +1736,35 @@ export default class Xapi extends XapiBase {
await this._startVm(vm)
} finally {
vm.set_PV_bootloader(bootloader)::ignoreErrors()
this._setObjectProperties(vm, {
PV_bootloader: bootloader,
})::ignoreErrors()
forEach(bootables, ([vbd, bootable]) => {
vbd.set_bootable(bootable)::ignoreErrors()
this._setObjectProperties(vbd, { bootable })::ignoreErrors()
})
}
}
}
// vm_operations: http://xapi-project.github.io/xen-api/classes/vm.html
async addForbiddenOperationToVm(vmId, operation, reason) {
await this.call(
'VM.add_to_blocked_operations',
this.getObject(vmId).$ref,
operation,
`[XO] ${reason}`
)
}
async removeForbiddenOperationFromVm(vmId, operation) {
await this.call(
'VM.remove_from_blocked_operations',
this.getObject(vmId).$ref,
operation
)
}
// =================================================================
async createVbd({
@@ -1669,14 +1825,14 @@ export default class Xapi extends XapiBase {
})
if (isVmRunning(vm)) {
await this.callAsync('VBD.plug', vbdRef)
await this.call('VBD.plug', vbdRef)
}
}
_cloneVdi(vdi) {
log.debug(`Cloning VDI ${vdi.name_label}`)
return this.callAsync('VDI.clone', vdi.$ref).then(extractOpaqueRef)
return this.call('VDI.clone', vdi.$ref)
}
async createVdi({
@@ -1699,7 +1855,7 @@ export default class Xapi extends XapiBase {
log.debug(`Creating VDI ${name_label} on ${sr.name_label}`)
return this._getOrWaitObject(
await this.callAsync('VDI.create', {
await this.call('VDI.create', {
name_description,
name_label,
other_config,
@@ -1711,7 +1867,7 @@ export default class Xapi extends XapiBase {
type,
virtual_size: size !== undefined ? parseSize(size) : virtual_size,
xenstore_data,
}).then(extractOpaqueRef)
})
)
}
@@ -1729,12 +1885,9 @@ export default class Xapi extends XapiBase {
}`
)
try {
await pRetry(
() => this.callAsync('VDI.pool_migrate', vdi.$ref, sr.$ref, {}),
{
when: { code: 'TOO_MANY_STORAGE_MIGRATES' },
}
)
await pRetry(() => this.call('VDI.pool_migrate', vdi.$ref, sr.$ref, {}), {
when: { code: 'TOO_MANY_STORAGE_MIGRATES' },
})
} catch (error) {
const { code } = error
if (
@@ -1745,9 +1898,7 @@ export default class Xapi extends XapiBase {
throw error
}
const newVdi = await this.barrier(
await this.callAsync('VDI.copy', vdi.$ref, sr.$ref).then(
extractOpaqueRef
)
await this.call('VDI.copy', vdi.$ref, sr.$ref)
)
await asyncMap(vdi.$VBDs, async vbd => {
await this.call('VBD.destroy', vbd.$ref)
@@ -1765,7 +1916,7 @@ export default class Xapi extends XapiBase {
log.debug(`Deleting VDI ${vdiRef}`)
try {
await this.callAsync('VDI.destroy', vdiRef)
await this.call('VDI.destroy', vdiRef)
} catch (error) {
if (error?.code !== 'HANDLE_INVALID') {
throw error
@@ -1778,7 +1929,7 @@ export default class Xapi extends XapiBase {
`Resizing VDI ${vdi.name_label} from ${vdi.virtual_size} to ${size}`
)
return this.callAsync('VDI.resize', vdi.$ref, size)
return this.call('VDI.resize', vdi.$ref, size)
}
_getVmCdDrive(vm) {
@@ -1792,7 +1943,7 @@ export default class Xapi extends XapiBase {
async _ejectCdFromVm(vm) {
const cdDrive = this._getVmCdDrive(vm)
if (cdDrive) {
await this.callAsync('VBD.eject', cdDrive.$ref)
await this.call('VBD.eject', cdDrive.$ref)
}
}
@@ -1800,20 +1951,20 @@ export default class Xapi extends XapiBase {
const cdDrive = await this._getVmCdDrive(vm)
if (cdDrive) {
try {
await this.callAsync('VBD.insert', cdDrive.$ref, cd.$ref)
await this.call('VBD.insert', cdDrive.$ref, cd.$ref)
} catch (error) {
if (!force || error.code !== 'VBD_NOT_EMPTY') {
throw error
}
await this.callAsync('VBD.eject', cdDrive.$ref)::ignoreErrors()
await this.call('VBD.eject', cdDrive.$ref)::ignoreErrors()
// Retry.
await this.callAsync('VBD.insert', cdDrive.$ref, cd.$ref)
await this.call('VBD.insert', cdDrive.$ref, cd.$ref)
}
if (bootable !== Boolean(cdDrive.bootable)) {
await cdDrive.set_bootable(bootable)
await this._setObjectProperties(cdDrive, { bootable })
}
} else {
await this.createVbd({
@@ -1826,7 +1977,7 @@ export default class Xapi extends XapiBase {
}
async connectVbd(vbdId) {
await this.callAsync('VBD.plug', vbdId)
await this.call('VBD.plug', vbdId)
}
async _disconnectVbd(vbd) {
@@ -1835,7 +1986,7 @@ export default class Xapi extends XapiBase {
await this.call('VBD.unplug_force', vbd.$ref)
} catch (error) {
if (error.code === 'VBD_NOT_UNPLUGGABLE') {
await vbd.set_unpluggable(true)
await this.call('VBD.set_unpluggable', vbd.$ref, true)
return this.call('VBD.unplug_force', vbd.$ref)
}
}
@@ -1886,11 +2037,11 @@ export default class Xapi extends XapiBase {
const vdi = this.getObject(vdiId)
const snap = await this._getOrWaitObject(
await this.callAsync('VDI.snapshot', vdi.$ref).then(extractOpaqueRef)
await this.call('VDI.snapshot', vdi.$ref)
)
if (nameLabel) {
await snap.set_name_label(nameLabel)
await this.call('VDI.set_name_label', snap.$ref, nameLabel)
}
return snap
@@ -2014,7 +2165,7 @@ export default class Xapi extends XapiBase {
)
if (currently_attached && isVmRunning(vm)) {
await this.callAsync('VIF.plug', vifRef)
await this.call('VIF.plug', vifRef)
}
return vifRef
@@ -2042,7 +2193,7 @@ export default class Xapi extends XapiBase {
// https://citrix.github.io/xenserver-sdk/#network
other_config: { automatic: 'false' },
})
$defer.onFailure(() => this.callAsync('network.destroy', networkRef))
$defer.onFailure(() => this.call('network.destroy', networkRef))
if (pifId) {
await this.call(
'pool.create_VLAN_from_PIF',
@@ -2081,7 +2232,7 @@ export default class Xapi extends XapiBase {
await Promise.all(
mapToArray(
vlans,
vlan => vlan !== NULL_REF && this.callAsync('VLAN.destroy', vlan)
vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan)
)
)
@@ -2096,7 +2247,7 @@ export default class Xapi extends XapiBase {
newPifs,
pifRef =>
!wasAttached[this.getObject(pifRef).host] &&
this.callAsync('PIF.unplug', pifRef)::ignoreErrors()
this.call('PIF.unplug', pifRef)::ignoreErrors()
)
)
}
@@ -2127,7 +2278,7 @@ export default class Xapi extends XapiBase {
await Promise.all(
mapToArray(
vlans,
vlan => vlan !== NULL_REF && this.callAsync('VLAN.destroy', vlan)
vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan)
)
)
@@ -2136,17 +2287,7 @@ export default class Xapi extends XapiBase {
mapToArray(bonds, bond => this.call('Bond.destroy', bond))
)
const tunnels = filter(this.objects.all, { $type: 'tunnel' })
await Promise.all(
map(pifs, async pif => {
const tunnel = find(tunnels, { access_PIF: pif.$ref })
if (tunnel != null) {
await this.callAsync('tunnel.destroy', tunnel.$ref)
}
})
)
await this.callAsync('network.destroy', network.$ref)
await this.call('network.destroy', network.$ref)
}
// =================================================================
@@ -2346,7 +2487,7 @@ export default class Xapi extends XapiBase {
)
}
async assertConsistentHostServerTime(hostRef) {
async _assertConsistentHostServerTime(hostRef) {
const delta =
parseDateTime(await this.call('host.get_servertime', hostRef)).getTime() -
Date.now()
@@ -2358,27 +2499,4 @@ export default class Xapi extends XapiBase {
)
}
}
async isHyperThreadingEnabled(hostId) {
try {
return (
(await this.call(
'host.call_plugin',
this.getObject(hostId).$ref,
'hyperthreading.py',
'get_hyperthreading',
{}
)) !== 'false'
)
} catch (error) {
if (
error.code === 'XENAPI_MISSING_PLUGIN' ||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
) {
return null
} else {
throw error
}
}
}
}

View File

@@ -68,12 +68,18 @@ declare export class Xapi {
sr?: XapiObject,
onVmCreation?: (XapiObject) => any
): Promise<string>;
_updateObjectMapProperty(
object: XapiObject,
property: string,
entries: $Dict<null | string>
): Promise<void>;
_setObjectProperties(
object: XapiObject,
properties: $Dict<mixed>
): Promise<void>;
_snapshotVm(cancelToken: mixed, vm: Vm, nameLabel?: string): Promise<Vm>;
addTag(object: Id, tag: string): Promise<void>;
barrier(): Promise<void>;
barrier(ref: string): Promise<XapiObject>;
deleteVm(vm: Id): Promise<void>;

View File

@@ -4,13 +4,13 @@ import { makeEditObject } from '../utils'
export default {
async _connectVif(vif) {
await this.callAsync('VIF.plug', vif.$ref)
await this.call('VIF.plug', vif.$ref)
},
async connectVif(vifId) {
await this._connectVif(this.getObject(vifId))
},
async _deleteVif(vif) {
await this.callAsync('VIF.destroy', vif.$ref)
await this.call('VIF.destroy', vif.$ref)
},
async deleteVif(vifId) {
const vif = this.getObject(vifId)
@@ -20,7 +20,7 @@ export default {
await this._deleteVif(vif)
},
async _disconnectVif(vif) {
await this.callAsync('VIF.unplug_force', vif.$ref)
await this.call('VIF.unplug_force', vif.$ref)
},
async disconnectVif(vifId) {
await this._disconnectVif(this.getObject(vifId))
@@ -37,7 +37,7 @@ export default {
: 'locked'
if (lockingMode !== vif.locking_mode) {
return vif.set_locking_mode(lockingMode)
return this._set('locking_mode', lockingMode)
}
},
],
@@ -53,36 +53,10 @@ export default {
: 'locked'
if (lockingMode !== vif.locking_mode) {
return vif.set_locking_mode(lockingMode)
return this._set('locking_mode', lockingMode)
}
},
],
},
// in kB/s
rateLimit: {
get: vif => {
if (vif.qos_algorithm_type === 'ratelimit') {
const { kbps } = vif.qos_algorithm_params
if (kbps !== undefined) {
return +kbps
}
}
// null is value used to remove the existing value
//
// we need to match this, to allow avoiding the `set` if the value is
// already missing.
return null
},
set: (value, vif) =>
Promise.all([
vif.set_qos_algorithm_type(value === null ? '' : 'ratelimit'),
vif.update_qos_algorithm_params(
'kbps',
value === null ? null : String(value)
),
]),
},
}),
}

View File

@@ -353,10 +353,9 @@ export default {
if (JSON.parse(update).exit !== 0) {
throw new Error('Update install failed')
} else {
await host.update_other_config(
'rpm_patch_installation_time',
String(Date.now() / 1000)
)
await this._updateObjectMapProperty(host, 'other_config', {
rpm_patch_installation_time: String(Date.now() / 1000),
})
}
})
},

View File

@@ -35,7 +35,7 @@ export default {
},
_plugPbd(pbd) {
return this.callAsync('PBD.plug', pbd.$ref)
return this.call('PBD.plug', pbd.$ref)
},
async plugPbd(id) {
@@ -43,7 +43,7 @@ export default {
},
_unplugPbd(pbd) {
return this.callAsync('PBD.unplug', pbd.$ref)
return this.call('PBD.unplug', pbd.$ref)
},
async unplugPbd(id) {
@@ -84,32 +84,4 @@ export default {
})
return unhealthyVdis
},
async createSr({
hostRef,
content_type = 'user', // recommended by Citrix
device_config = {},
name_description = '',
name_label,
shared = false,
physical_size = 0,
sm_config = {},
type,
}) {
const srRef = await this.call(
'SR.create',
hostRef,
device_config,
physical_size,
name_label,
name_description,
type,
content_type,
shared,
sm_config
)
return (await this.barrier(srRef)).uuid
},
}

View File

@@ -52,7 +52,6 @@ export default {
coreOs = false,
cloudConfig = undefined,
networkConfig = undefined,
vgpuType = undefined,
gpuGroup = undefined,
@@ -94,7 +93,7 @@ export default {
// Creates the VDIs and executes the initial steps of the
// installation.
await this.callAsync('VM.provision', vmRef)
await this.call('VM.provision', vmRef)
let vm = await this._getOrWaitObject(vmRef)
@@ -107,12 +106,17 @@ export default {
if (isHvm) {
if (!isEmpty(vdis) || installMethod === 'network') {
const { order } = vm.HVM_boot_params
const { HVM_boot_params: bootParams } = vm
let order = bootParams.order
if (order) {
order = 'n' + order.replace('n', '')
} else {
order = 'ncd'
}
vm.update_HVM_boot_params(
'order',
order ? 'n' + order.replace('n', '') : 'ncd'
)
this._setObjectProperties(vm, {
HVM_boot_params: { ...bootParams, order },
})
}
} else {
// PV
@@ -120,12 +124,13 @@ export default {
if (installMethod === 'network') {
// TODO: normalize RHEL URL?
await vm.update_other_config(
'install-repository',
installRepository
)
await this._updateObjectMapProperty(vm, 'other_config', {
'install-repository': installRepository,
})
} else if (installMethod === 'cd') {
await vm.update_other_config('install-repository', 'cdrom')
await this._updateObjectMapProperty(vm, 'other_config', {
'install-repository': 'cdrom',
})
}
}
}
@@ -236,16 +241,10 @@ export default {
}
})
if (coreOs) {
await this.createCoreOsCloudInitConfigDrive(vm.$id, srRef, cloudConfig)
} else {
await this.createCloudInitConfigDrive(
vm.$id,
srRef,
cloudConfig,
networkConfig
)
}
const method = coreOs
? 'createCoreOsCloudInitConfigDrive'
: 'createCloudInitConfigDrive'
await this[method](vm.$id, srRef, cloudConfig)
}
// wait for the record with all the VBDs and VIFs
@@ -258,15 +257,25 @@ export default {
_editVm: makeEditObject({
affinityHost: {
get: 'affinity',
set: (value, vm) =>
vm.set_affinity(value ? this.getObject(value).$ref : NULL_REF),
set(value, vm) {
return this._setObjectProperty(
vm,
'affinity',
value ? this.getObject(value).$ref : NULL_REF
)
},
},
autoPoweron: {
set(value, vm) {
return Promise.all([
vm.update_other_config('auto_poweron', value ? 'true' : null),
value && vm.$pool.update_other_config('auto_poweron', 'true'),
this._updateObjectMapProperty(vm, 'other_config', {
autoPoweron: value ? 'true' : null,
}),
value &&
this.setPoolProperties({
autoPoweron: true,
}),
])
},
},
@@ -276,19 +285,23 @@ export default {
if (virtualizationMode !== 'pv' && virtualizationMode !== 'hvm') {
throw new Error(`The virtualization mode must be 'pv' or 'hvm'`)
}
return vm
.set_domain_type(virtualizationMode)
::pCatch({ code: 'MESSAGE_METHOD_UNKNOWN' }, () =>
vm.set_HVM_boot_policy(
return this._set('domain_type', virtualizationMode)::pCatch(
{ code: 'MESSAGE_METHOD_UNKNOWN' },
() =>
this._set(
'HVM_boot_policy',
virtualizationMode === 'hvm' ? 'Boot order' : ''
)
)
)
},
},
coresPerSocket: {
set: (coresPerSocket, vm) =>
vm.update_platform('cores-per-socket', String(coresPerSocket)),
set(coresPerSocket, vm) {
return this._updateObjectMapProperty(vm, 'platform', {
'cores-per-socket': coresPerSocket,
})
},
},
CPUs: 'cpus',
@@ -306,22 +319,26 @@ export default {
get: vm => +vm.VCPUs_at_startup,
set: [
'VCPUs_at_startup',
(value, vm) => isVmRunning(vm) && vm.set_VCPUs_number_live(value),
function(value, vm) {
return isVmRunning(vm) && this._set('VCPUs_number_live', value)
},
],
},
cpuCap: {
get: vm => vm.VCPUs_params.cap && +vm.VCPUs_params.cap,
set: (cap, vm) => vm.update_VCPUs_params('cap', String(cap)),
set(cap, vm) {
return this._updateObjectMapProperty(vm, 'VCPUs_params', { cap })
},
},
cpuMask: {
get: vm => vm.VCPUs_params.mask && vm.VCPUs_params.mask.split(','),
set: (cpuMask, vm) =>
vm.update_VCPUs_params(
'mask',
cpuMask == null ? cpuMask : cpuMask.join(',')
),
set(cpuMask, vm) {
return this._updateObjectMapProperty(vm, 'VCPUs_params', {
mask: cpuMask == null ? cpuMask : cpuMask.join(','),
})
},
},
cpusMax: 'cpusStaticMax',
@@ -335,15 +352,15 @@ export default {
cpuWeight: {
get: vm => vm.VCPUs_params.weight && +vm.VCPUs_params.weight,
set: (weight, vm) =>
vm.update_VCPUs_params(
'weight',
weight === null ? null : String(weight)
),
set(weight, vm) {
return this._updateObjectMapProperty(vm, 'VCPUs_params', { weight })
},
},
highAvailability: {
set: (ha, vm) => vm.set_ha_restart_priority(ha),
set(ha, vm) {
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
},
},
memoryMin: {
@@ -415,12 +432,19 @@ export default {
hasVendorDevice: true,
expNestedHvm: {
set: (expNestedHvm, vm) =>
vm.update_platform('exp-nested-hvm', expNestedHvm ? 'true' : null),
set(expNestedHvm, vm) {
return this._updateObjectMapProperty(vm, 'platform', {
'exp-nested-hvm': expNestedHvm ? 'true' : null,
})
},
},
nicType: {
set: (nicType, vm) => vm.update_platform('nic_type', nicType),
set(nicType, vm) {
return this._updateObjectMapProperty(vm, 'platform', {
nic_type: nicType,
})
},
},
vga: {
@@ -430,7 +454,7 @@ export default {
`The different values that the VGA can take are: ${XEN_VGA_VALUES}`
)
}
return vm.update_platform('vga', vga)
return this._updateObjectMapProperty(vm, 'platform', { vga })
},
},
@@ -441,17 +465,15 @@ export default {
`The different values that the video RAM can take are: ${XEN_VIDEORAM_VALUES}`
)
}
return vm.update_platform('videoram', String(videoram))
return this._updateObjectMapProperty(vm, 'platform', { videoram })
},
},
startDelay: {
get: vm => +vm.start_delay,
set: (startDelay, vm) => vm.set_start_delay(startDelay),
},
hvmBootFirmware: {
set: (firmware, vm) => vm.update_HVM_boot_params('firmware', firmware),
set(startDelay, vm) {
return this.call('VM.set_start_delay', vm.$ref, startDelay)
},
},
}),
@@ -464,7 +486,7 @@ export default {
if (snapshotBefore) {
await this._snapshotVm(snapshot.$snapshot_of)
}
await this.callAsync('VM.revert', snapshot.$ref)
await this.call('VM.revert', snapshot.$ref)
if (snapshot.snapshot_info['power-state-at-snapshot'] === 'Running') {
const vm = await this.barrier(snapshot.snapshot_of)
if (vm.power_state === 'Halted') {
@@ -477,22 +499,15 @@ export default {
async resumeVm(vmId) {
// the force parameter is always true
await this.callAsync('VM.resume', this.getObject(vmId).$ref, false, true)
return this.call('VM.resume', this.getObject(vmId).$ref, false, true)
},
async unpauseVm(vmId) {
await this.callAsync('VM.unpause', this.getObject(vmId).$ref)
},
rebootVm(vmId, { hard = false } = {}) {
return this.callAsync(
`VM.${hard ? 'hard' : 'clean'}_reboot`,
this.getObject(vmId).$ref
).then(noop)
return this.call('VM.unpause', this.getObject(vmId).$ref)
},
shutdownVm(vmId, { hard = false } = {}) {
return this.callAsync(
return this.call(
`VM.${hard ? 'hard' : 'clean'}_shutdown`,
this.getObject(vmId).$ref
).then(noop)

View File

@@ -148,8 +148,8 @@ export const makeEditObject = specs => {
if (set === true) {
const prop = camelToSnakeCase(name)
return function(value, obj) {
return this.setField(obj.$type, obj.$ref, prop, value)
return function(value) {
return this._set(prop, value)
}
}
@@ -157,22 +157,16 @@ export const makeEditObject = specs => {
const index = set.indexOf('.')
if (index === -1) {
const prop = camelToSnakeCase(set)
return function(value, obj) {
return this.setField(obj.$type, obj.$ref, prop, value)
return function(value) {
return this._set(prop, value)
}
}
const field = set.slice(0, index)
const entry = set.slice(index + 1)
const map = set.slice(0, index)
const prop = set.slice(index + 1)
return function(value, object) {
return this.setFieldEntry(
object.$type,
object.$ref,
field,
entry,
value
)
return this._updateObjectMapProperty(object, map, { [prop]: value })
}
}
@@ -255,6 +249,16 @@ export const makeEditObject = specs => {
const limits = checkLimits && {}
const object = this.getObject(id)
const _objectRef = object.$ref
const _setMethodPrefix = `${object.$type}.set_`
// Context used to execute functions.
const context = {
__proto__: this,
_set: (prop, value) =>
this.call(_setMethodPrefix + prop, _objectRef, prepareXapiParam(value)),
}
const set = (value, name) => {
if (value === undefined) {
return
@@ -283,7 +287,7 @@ export const makeEditObject = specs => {
}
}
const cb = () => spec.set.call(this, value, object)
const cb = () => spec.set.call(context, value, object)
const { constraints } = spec
if (constraints) {

View File

@@ -60,9 +60,8 @@ function checkParams(method, params) {
const result = schemaInspector.validate(
{
properties: schema,
strict: true,
type: 'object',
properties: schema,
},
params
)
@@ -262,15 +261,11 @@ export default class Api {
//
// The goal here is to standardize the calls by always providing
// an id parameter when possible to simplify calls to the API.
if (params?.id === undefined) {
if (params != null && params.id === undefined) {
const namespace = name.slice(0, name.indexOf('.'))
const spec = method.params
if (spec !== undefined && 'id' in spec && !(namespace in spec)) {
const id = params[namespace]
if (typeof id === 'string') {
delete params[namespace]
params.id = id
}
const id = params[namespace]
if (typeof id === 'string') {
params.id = id
}
}

View File

@@ -1,5 +1,4 @@
import createLogger from '@xen-orchestra/log'
import { createPredicate } from 'value-matcher'
import { ignoreErrors } from 'promise-toolbox'
import { invalidCredentials, noSuchObject } from 'xo-common/api-errors'
@@ -194,14 +193,6 @@ export default class {
}
}
async deleteAuthenticationTokens({ filter }) {
return Promise.all(
(await this._tokens.get())
.filter(createPredicate(filter))
.map(({ id }) => this.deleteAuthenticationToken(id))
)
}
async getAuthenticationToken(id) {
let token = await this._tokens.first(id)
if (token === undefined) {

View File

@@ -1,8 +1,10 @@
import ms from 'ms'
import { forEach, isEmpty, iteratee, sortedIndexBy } from 'lodash'
import { noSuchObject } from 'xo-common/api-errors'
const isSkippedError = error =>
error.message === 'no disks found' ||
noSuchObject.is(error) ||
error.message === 'no VMs match this pattern' ||
error.message === 'unhealthy VDI chain'

View File

@@ -29,7 +29,6 @@ import {
ignoreErrors,
pFinally,
pFromEvent,
timeout,
} from 'promise-toolbox'
import Vhd, {
chainVhd,
@@ -42,7 +41,6 @@ import { type CallJob, type Executor, type Job } from '../jobs'
import { type Schedule } from '../scheduling'
import createSizeStream from '../../size-stream'
import parseDuration from '../../_parseDuration'
import {
type DeltaVmExport,
type DeltaVmImport,
@@ -288,7 +286,7 @@ const importers: $Dict<
xapi.importVm(xva, { srId: sr.$id })
)
await Promise.all([
vm.add_tags('restored from backup'),
xapi.addTag(vm.$id, 'restored from backup'),
xapi.editVm(vm.$id, {
name_label: `${metadata.vm.name_label} (${safeDateFormat(
metadata.timestamp
@@ -451,8 +449,10 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
}
return Promise.all([
vm.set_ha_restart_priority(''),
vm.add_tags('HA disabled'),
xapi._setObjectProperties(vm, {
haRestartPriority: '',
}),
xapi.addTag(vm.$ref, 'HA disabled'),
])
}
@@ -509,17 +509,9 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
// │ │ ├─ task.start(message: 'transfer')
// │ │ │ ├─ task.warning(message: string)
// │ │ │ └─ task.end(result: { size: number })
// │ │ │
// │ │ │ // in case of full backup, DR and CR
// │ │ ├─ task.start(message: 'clean')
// │ │ │ ├─ task.warning(message: string)
// │ │ │ └─ task.end
// │ │ │
// │ │ │ // in case of delta backup
// │ │ ├─ task.start(message: 'merge')
// │ │ │ ├─ task.warning(message: string)
// │ │ │ └─ task.end(result: { size: number })
// │ │ │
// │ │ └─ task.end
// │ └─ task.end
// └─ job.end
@@ -546,11 +538,10 @@ export default class BackupNg {
return this._runningRestores
}
constructor(app: any, { backup }) {
constructor(app: any) {
this._app = app
this._logger = undefined
this._runningRestores = new Set()
this._backupOptions = backup
app.on('start', async () => {
this._logger = await app.getLogger('restore')
@@ -946,7 +937,7 @@ export default class BackupNg {
message: 'clean backup metadata on VM',
parentId: taskId,
},
vm.update_other_config({
xapi._updateObjectMapProperty(vm, 'other_config', {
'xo:backup:datetime': null,
'xo:backup:deltaChainLength': null,
'xo:backup:exported': null,
@@ -1060,7 +1051,7 @@ export default class BackupNg {
message: 'add metadata to snapshot',
parentId: taskId,
},
snapshot.update_other_config({
xapi._updateObjectMapProperty(snapshot, 'other_config', {
'xo:backup:datetime': snapshot.snapshot_time,
'xo:backup:job': jobId,
'xo:backup:schedule': scheduleId,
@@ -1202,20 +1193,11 @@ export default class BackupNg {
)
): any)
const deleteOldBackups = () =>
wrapTask(
{
logger,
message: 'clean',
parentId: taskId,
},
this._deleteFullVmBackups(handler, oldBackups)
)
const deleteFirst = getSetting(settings, 'deleteFirst', [
remoteId,
])
if (deleteFirst) {
await deleteOldBackups()
await this._deleteFullVmBackups(handler, oldBackups)
}
await wrapTask(
@@ -1231,7 +1213,7 @@ export default class BackupNg {
await handler.outputFile(metadataFilename, jsonMetadata)
if (!deleteFirst) {
await deleteOldBackups()
await this._deleteFullVmBackups(handler, oldBackups)
}
}
)
@@ -1262,18 +1244,9 @@ export default class BackupNg {
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
)
const deleteOldBackups = () =>
wrapTask(
{
logger,
message: 'clean',
parentId: taskId,
},
this._deleteVms(xapi, oldVms)
)
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
if (deleteFirst) {
await deleteOldBackups()
await this._deleteVms(xapi, oldVms)
}
const vm = await xapi.barrier(
@@ -1285,27 +1258,29 @@ export default class BackupNg {
result: () => ({ size: xva.size }),
},
xapi._importVm($cancelToken, fork, sr, vm =>
vm.set_name_label(
`${metadata.vm.name_label} - ${
xapi._setObjectProperties(vm, {
nameLabel: `${metadata.vm.name_label} - ${
job.name
} - (${safeDateFormat(metadata.timestamp)})`
)
} - (${safeDateFormat(metadata.timestamp)})`,
})
)
)
)
await Promise.all([
vm.add_tags('Disaster Recovery'),
xapi.addTag(vm.$ref, 'Disaster Recovery'),
disableVmHighAvailability(xapi, vm),
vm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
vm.update_other_config('xo:backup:sr', srId),
xapi._updateObjectMapProperty(vm, 'blocked_operations', {
start:
'Start operation for this vm is blocked, clone it if you want to use it.',
}),
xapi._updateObjectMapProperty(vm, 'other_config', {
'xo:backup:sr': srId,
}),
])
if (!deleteFirst) {
await deleteOldBackups()
await this._deleteVms(xapi, oldVms)
}
}
)
@@ -1631,19 +1606,9 @@ export default class BackupNg {
listReplicatedVms(xapi, scheduleId, srId, vmUuid)
)
const deleteOldBackups = () =>
wrapTask(
{
logger,
message: 'clean',
parentId: taskId,
},
this._deleteVms(xapi, oldVms)
)
const deleteFirst = getSetting(settings, 'deleteFirst', [srId])
if (deleteFirst) {
await deleteOldBackups()
await this._deleteVms(xapi, oldVms)
}
const { vm } = await wrapTask(
@@ -1663,17 +1628,19 @@ export default class BackupNg {
)
await Promise.all([
vm.add_tags('Continuous Replication'),
xapi.addTag(vm.$ref, 'Continuous Replication'),
disableVmHighAvailability(xapi, vm),
vm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
vm.update_other_config('xo:backup:sr', srId),
xapi._updateObjectMapProperty(vm, 'blocked_operations', {
start:
'Start operation for this vm is blocked, clone it if you want to use it.',
}),
xapi._updateObjectMapProperty(vm, 'other_config', {
'xo:backup:sr': srId,
}),
])
if (!deleteFirst) {
await deleteOldBackups()
await this._deleteVms(xapi, oldVms)
}
}
)
@@ -1700,7 +1667,9 @@ export default class BackupNg {
message: 'set snapshot.other_config[xo:backup:exported]',
parentId: taskId,
},
snapshot.update_other_config('xo:backup:exported', 'true')
xapi._updateObjectMapProperty(snapshot, 'other_config', {
'xo:backup:exported': 'true',
})
)
}
@@ -1800,16 +1769,6 @@ export default class BackupNg {
const path = `${dir}/${file}`
try {
const metadata = JSON.parse(String(await handler.readFile(path)))
if (metadata.mode === 'full') {
metadata.size = await timeout
.call(
handler.getSize(resolveRelativeFromFile(path, metadata.xva)),
parseDuration(this._backupOptions.vmBackupSizeTimeout)
)
.catch(err => {
log.warn(`_listVmBackups, getSize`, { err })
})
}
if (predicate === undefined || predicate(metadata)) {
Object.defineProperty(metadata, '_filename', {
value: path,

View File

@@ -372,7 +372,7 @@ export default class {
const { datetime } = parseVmBackupPath(file)
await Promise.all([
vm.add_tags('restored from backup'),
xapi.addTag(vm.$id, 'restored from backup'),
xapi.editVm(vm.$id, {
name_label: `${vm.name_label} (${shortDate(datetime * 1e3)})`,
}),
@@ -456,9 +456,11 @@ export default class {
// (Asynchronously) Identify snapshot as future base.
promise
.then(() =>
srcVm.update_other_config(TAG_LAST_BASE_DELTA, delta.vm.uuid)
)
.then(() => {
return srcXapi._updateObjectMapProperty(srcVm, 'other_config', {
[TAG_LAST_BASE_DELTA]: delta.vm.uuid,
})
})
::ignoreErrors()
return promise
@@ -972,13 +974,12 @@ export default class {
nameLabel: copyName,
})
await Promise.all([
data.vm.add_tags('Disaster Recovery'),
data.vm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
])
targetXapi._updateObjectMapProperty(data.vm, 'blocked_operations', {
start:
'Start operation for this vm is blocked, clone it if you want to use it.',
})
await targetXapi.addTag(data.vm.$id, 'Disaster Recovery')
if (!deleteOldBackupsFirst) {
await this._removeVms(targetXapi, vmsToRemove)

View File

@@ -164,11 +164,11 @@ export default class Jobs {
xo.emit(
'job:terminated',
undefined,
job,
undefined,
// This cast can be removed after merging the PR: https://github.com/vatesfr/xen-orchestra/pull/3209
String(job.runId),
{
type: job.type,
}
String(job.runId)
)
return this.updateJob({ id: job.id, runId: null })
})
@@ -266,11 +266,6 @@ export default class Jobs {
reportWhen: (settings && settings.reportWhen) || 'failure',
}
}
if (type === 'metadataBackup') {
data = {
reportWhen: job.settings['']?.reportWhen ?? 'failure',
}
}
const logger = this._logger
const runJobId = logger.notice(`Starting execution of ${id}.`, {
@@ -319,10 +314,7 @@ export default class Jobs {
true
)
app.emit('job:terminated', runJobId, {
type: job.type,
status,
})
app.emit('job:terminated', status, job, schedule, runJobId)
} catch (error) {
await logger.error(
`The execution of ${id} has failed.`,
@@ -333,9 +325,7 @@ export default class Jobs {
},
true
)
app.emit('job:terminated', runJobId, {
type: job.type,
})
app.emit('job:terminated', undefined, job, schedule, runJobId)
throw error
} finally {
this.updateJob({ id, runId: null })::ignoreErrors()

View File

@@ -1,6 +1,7 @@
// @flow
import asyncMap from '@xen-orchestra/async-map'
import createLogger from '@xen-orchestra/log'
import defer from 'golike-defer'
import { fromEvent, ignoreErrors } from 'promise-toolbox'
import debounceWithKey from '../_pDebounceWithKey'
@@ -24,14 +25,9 @@ const METADATA_BACKUP_JOB_TYPE = 'metadataBackup'
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
const DEFAULT_RETENTION = 0
type ReportWhen = 'always' | 'failure' | 'never'
type Settings = {|
reportWhen?: ReportWhen,
retentionPoolMetadata?: number,
retentionXoMetadata?: number,
retentionPoolMetadata?: number,
|}
type MetadataBackupJob = {
@@ -43,20 +39,6 @@ type MetadataBackupJob = {
xoMetadata?: boolean,
}
const logInstantFailureTask = (logger, { data, error, message, parentId }) => {
const taskId = logger.notice(message, {
data,
event: 'task.start',
parentId,
})
logger.error(message, {
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
})
}
const createSafeReaddir = (handler, methodName) => (path, options) =>
handler.list(path, options).catch(error => {
if (error?.code !== 'ENOENT') {
@@ -65,22 +47,6 @@ const createSafeReaddir = (handler, methodName) => (path, options) =>
return []
})
const deleteOldBackups = (handler, dir, retention, handleError) =>
handler.list(dir).then(list => {
list.sort()
list = list
.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp))
.slice(0, -retention)
return Promise.all(
list.map(timestamp => {
const backupDir = `${dir}/${timestamp}`
return handler
.rmtree(backupDir)
.catch(error => handleError(error, backupDir))
})
)
}, handleError)
// metadata.json
//
// {
@@ -110,14 +76,10 @@ const deleteOldBackups = (handler, dir, retention, handleError) =>
//
// Task logs emitted in a metadata backup execution:
//
// job.start(data: { reportWhen: ReportWhen })
// ├─ task.start(data: { type: 'pool', id: string, pool?: <Pool />, poolMaster?: <Host /> })
// │ ├─ task.start(data: { type: 'remote', id: string })
// │ │ └─ task.end
// job.start
// ├─ task.start(data: { type: 'pool', id: string, pool: <Pool />, poolMaster: <Host /> })
// │ └─ task.end
// ├─ task.start(data: { type: 'xo' })
// │ ├─ task.start(data: { type: 'remote', id: string })
// │ │ └─ task.end
// │ └─ task.end
// └─ job.end
export default class metadataBackup {
@@ -170,286 +132,6 @@ export default class metadataBackup {
})
}
async _backupXo({ handlers, job, logger, retention, runJobId, schedule }) {
const app = this._app
const timestamp = Date.now()
const taskId = logger.notice(`Starting XO metadata backup. (${job.id})`, {
data: {
type: 'xo',
},
event: 'task.start',
parentId: runJobId,
})
try {
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
const dir = `${scheduleDir}/${safeDateFormat(timestamp)}`
const data = JSON.stringify(await app.exportConfig(), null, 2)
const fileName = `${dir}/data.json`
const metadata = JSON.stringify(
{
jobId: job.id,
jobName: job.name,
scheduleId: schedule.id,
scheduleName: schedule.name,
timestamp,
},
null,
2
)
const metaDataFileName = `${dir}/metadata.json`
await asyncMap(handlers, async (handler, remoteId) => {
const subTaskId = logger.notice(
`Starting XO metadata backup for the remote (${remoteId}). (${
job.id
})`,
{
data: {
id: remoteId,
type: 'remote',
},
event: 'task.start',
parentId: taskId,
}
)
try {
await Promise.all([
handler.outputFile(fileName, data),
handler.outputFile(metaDataFileName, metadata),
])
await deleteOldBackups(
handler,
scheduleDir,
retention,
(error, backupDir) => {
logger.warning(
backupDir !== undefined
? `unable to delete the folder ${backupDir}`
: `unable to list backups for the remote (${remoteId})`,
{
event: 'task.warning',
taskId: subTaskId,
data: {
error,
},
}
)
}
)
logger.notice(
`Backuping XO metadata for the remote (${remoteId}) is a success. (${
job.id
})`,
{
event: 'task.end',
status: 'success',
taskId: subTaskId,
}
)
} catch (error) {
await handler.rmtree(dir).catch(error => {
logger.warning(`unable to delete the folder ${dir}`, {
event: 'task.warning',
taskId: subTaskId,
data: {
error,
},
})
})
logger.error(
`Backuping XO metadata for the remote (${remoteId}) has failed. (${
job.id
})`,
{
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId: subTaskId,
}
)
}
})
logger.notice(`Backuping XO metadata is a success. (${job.id})`, {
event: 'task.end',
status: 'success',
taskId,
})
} catch (error) {
logger.error(`Backuping XO metadata has failed. (${job.id})`, {
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
})
}
}
async _backupPool(
poolId,
{ cancelToken, handlers, job, logger, retention, runJobId, schedule, xapi }
) {
const poolMaster = await xapi
.getRecord('host', xapi.pool.master)
::ignoreErrors()
const timestamp = Date.now()
const taskId = logger.notice(
`Starting metadata backup for the pool (${poolId}). (${job.id})`,
{
data: {
id: poolId,
pool: xapi.pool,
poolMaster,
type: 'pool',
},
event: 'task.start',
parentId: runJobId,
}
)
try {
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${poolId}`
const dir = `${poolDir}/${safeDateFormat(timestamp)}`
// TODO: export the metadata only once then split the stream between remotes
const stream = await xapi.exportPoolMetadata(cancelToken)
const fileName = `${dir}/data`
const metadata = JSON.stringify(
{
jobId: job.id,
jobName: job.name,
pool: xapi.pool,
poolMaster,
scheduleId: schedule.id,
scheduleName: schedule.name,
timestamp,
},
null,
2
)
const metaDataFileName = `${dir}/metadata.json`
await asyncMap(handlers, async (handler, remoteId) => {
const subTaskId = logger.notice(
`Starting metadata backup for the pool (${poolId}) for the remote (${remoteId}). (${
job.id
})`,
{
data: {
id: remoteId,
type: 'remote',
},
event: 'task.start',
parentId: taskId,
}
)
let outputStream
try {
await Promise.all([
(async () => {
outputStream = await handler.createOutputStream(fileName)
// 'readable-stream/pipeline' not call the callback when an error throws
// from the readable stream
stream.pipe(outputStream)
return fromEvent(stream, 'end').catch(error => {
if (error.message !== 'aborted') {
throw error
}
})
})(),
handler.outputFile(metaDataFileName, metadata),
])
await deleteOldBackups(
handler,
poolDir,
retention,
(error, backupDir) => {
logger.warning(
backupDir !== undefined
? `unable to delete the folder ${backupDir}`
: `unable to list backups for the remote (${remoteId})`,
{
event: 'task.warning',
taskId: subTaskId,
data: {
error,
},
}
)
}
)
logger.notice(
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) is a success. (${
job.id
})`,
{
event: 'task.end',
status: 'success',
taskId: subTaskId,
}
)
} catch (error) {
if (outputStream !== undefined) {
outputStream.destroy()
}
await handler.rmtree(dir).catch(error => {
logger.warning(`unable to delete the folder ${dir}`, {
event: 'task.warning',
taskId: subTaskId,
data: {
error,
},
})
})
logger.error(
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) has failed. (${
job.id
})`,
{
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId: subTaskId,
}
)
}
})
logger.notice(
`Backuping pool metadata (${poolId}) is a success. (${job.id})`,
{
event: 'task.end',
status: 'success',
taskId,
}
)
} catch (error) {
logger.error(
`Backuping pool metadata (${poolId}) has failed. (${job.id})`,
{
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
}
)
}
}
async _executor({
cancelToken,
job: job_,
@@ -473,102 +155,199 @@ export default class metadataBackup {
throw new Error('no metadata mode found')
}
let { retentionXoMetadata, retentionPoolMetadata } =
job.settings[schedule.id] || {}
const app = this._app
const { retentionXoMetadata, retentionPoolMetadata } =
job?.settings[schedule.id] || {}
// it also replaces null retentions introduced by the commit
// https://github.com/vatesfr/xen-orchestra/commit/fea5117ed83b58d3a57715b32d63d46e3004a094#diff-c02703199db2a4c217943cf8e02b91deR40
if (retentionXoMetadata == null) {
retentionXoMetadata = DEFAULT_RETENTION
}
if (retentionPoolMetadata == null) {
retentionPoolMetadata = DEFAULT_RETENTION
const timestamp = Date.now()
const formattedTimestamp = safeDateFormat(timestamp)
const commonMetadata = {
jobId: job.id,
jobName: job.name,
scheduleId: schedule.id,
scheduleName: schedule.name,
timestamp,
}
if (
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
(!job.xoMetadata && retentionPoolMetadata === 0) ||
(isEmptyPools && retentionXoMetadata === 0)
) {
const files = []
if (job.xoMetadata && retentionXoMetadata > 0) {
const taskId = logger.notice(`Starting XO metadata backup. (${job.id})`, {
data: {
type: 'xo',
},
event: 'task.start',
parentId: runJobId,
})
const xoMetadataDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
const dir = `${xoMetadataDir}/${formattedTimestamp}`
const data = JSON.stringify(await app.exportConfig(), null, 2)
const fileName = `${dir}/data.json`
const metadata = JSON.stringify(commonMetadata, null, 2)
const metaDataFileName = `${dir}/metadata.json`
files.push({
executeBackup: defer(($defer, handler) => {
$defer.onFailure(() => handler.rmtree(dir))
return Promise.all([
handler.outputFile(fileName, data),
handler.outputFile(metaDataFileName, metadata),
]).then(
result => {
logger.notice(`Backuping XO metadata is a success. (${job.id})`, {
event: 'task.end',
status: 'success',
taskId,
})
return result
},
error => {
logger.notice(`Backuping XO metadata has failed. (${job.id})`, {
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
})
throw error
}
)
}),
dir: xoMetadataDir,
retention: retentionXoMetadata,
})
}
if (!isEmptyPools && retentionPoolMetadata > 0) {
files.push(
...(await Promise.all(
poolIds.map(async id => {
const xapi = this._app.getXapi(id)
const poolMaster = await xapi.getRecord('host', xapi.pool.master)
const taskId = logger.notice(
`Starting metadata backup for the pool (${id}). (${job.id})`,
{
data: {
id,
pool: xapi.pool,
poolMaster,
type: 'pool',
},
event: 'task.start',
parentId: runJobId,
}
)
const poolMetadataDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${
schedule.id
}/${id}`
const dir = `${poolMetadataDir}/${formattedTimestamp}`
// TODO: export the metadata only once then split the stream between remotes
const stream = await app.getXapi(id).exportPoolMetadata(cancelToken)
const fileName = `${dir}/data`
const metadata = JSON.stringify(
{
...commonMetadata,
pool: xapi.pool,
poolMaster,
},
null,
2
)
const metaDataFileName = `${dir}/metadata.json`
return {
executeBackup: defer(($defer, handler) => {
$defer.onFailure(() => handler.rmtree(dir))
return Promise.all([
(async () => {
const outputStream = await handler.createOutputStream(
fileName
)
$defer.onFailure(() => outputStream.destroy())
// 'readable-stream/pipeline' not call the callback when an error throws
// from the readable stream
stream.pipe(outputStream)
return fromEvent(stream, 'end').catch(error => {
if (error.message !== 'aborted') {
throw error
}
})
})(),
handler.outputFile(metaDataFileName, metadata),
]).then(
result => {
logger.notice(
`Backuping pool metadata (${id}) is a success. (${
job.id
})`,
{
event: 'task.end',
status: 'success',
taskId,
}
)
return result
},
error => {
logger.notice(
`Backuping pool metadata (${id}) has failed. (${job.id})`,
{
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
}
)
throw error
}
)
}),
dir: poolMetadataDir,
retention: retentionPoolMetadata,
}
})
))
)
}
if (files.length === 0) {
throw new Error('no retentions corresponding to the metadata modes found')
}
cancelToken.throwIfRequested()
const app = this._app
const timestampReg = /^\d{8}T\d{6}Z$/
return asyncMap(
// TODO: emit a warning task if a remote is broken
asyncMap(remoteIds, id => app.getRemoteHandler(id)::ignoreErrors()),
async handler => {
if (handler === undefined) {
return
}
const handlers = {}
await Promise.all(
remoteIds.map(id =>
app.getRemoteHandler(id).then(
handler => {
handlers[id] = handler
},
error => {
logger.warning(`unable to get the handler for the remote (${id})`, {
event: 'task.warning',
taskId: runJobId,
data: {
error,
},
await Promise.all(
files.map(async ({ executeBackup, dir, retention }) => {
await executeBackup(handler)
// deleting old backups
await handler.list(dir).then(list => {
list.sort()
list = list
.filter(timestampDir => timestampReg.test(timestampDir))
.slice(0, -retention)
return Promise.all(
list.map(timestampDir =>
handler.rmtree(`${dir}/${timestampDir}`)
)
)
})
}
)
)
)
if (Object.keys(handlers).length === 0) {
return
}
const promises = []
if (job.xoMetadata && retentionXoMetadata !== 0) {
promises.push(
this._backupXo({
handlers,
job,
logger,
retention: retentionXoMetadata,
runJobId,
schedule,
})
)
}
if (!isEmptyPools && retentionPoolMetadata !== 0) {
poolIds.forEach(id => {
let xapi
try {
xapi = this._app.getXapi(id)
} catch (error) {
logInstantFailureTask(logger, {
data: {
type: 'pool',
id,
},
error,
message: `unable to get the xapi associated to the pool (${id})`,
parentId: runJobId,
})
}
if (xapi !== undefined) {
promises.push(
this._backupPool(id, {
cancelToken,
handlers,
job,
logger,
retention: retentionPoolMetadata,
runJobId,
schedule,
xapi,
})
)
}
})
}
return Promise.all(promises)
)
}
)
}
async createMetadataBackupJob(

View File

@@ -67,7 +67,7 @@ export default class {
const handlers = this._handlers
let handler = handlers[id]
if (handler === undefined) {
handler = getHandler(remote, this._remoteOptions)
handler = handlers[id] = getHandler(remote, this._remoteOptions)
try {
await handler.sync()
@@ -76,8 +76,6 @@ export default class {
ignoreErrors.call(this._updateRemote(id, { error: error.message }))
throw error
}
handlers[id] = handler
}
return handler
@@ -170,7 +168,7 @@ export default class {
}
@synchronized()
async _updateRemote(id, { url, ...props }) {
async _updateRemote(id, { benchmarks, url, ...props }) {
const remote = await this._getRemote(id)
// url is handled separately to take care of obfuscated values
@@ -178,6 +176,13 @@ export default class {
remote.url = format(sensitiveValues.merge(parse(url), parse(remote.url)))
}
if (
benchmarks !== undefined ||
(benchmarks = remote.benchmarks) !== undefined
) {
remote.benchmarks = JSON.stringify(benchmarks)
}
patch(remote, props)
return (await this._remotes.update(remote)).properties

View File

@@ -4,7 +4,6 @@ import { ignoreErrors } from 'promise-toolbox'
import { hash, needsRehash, verify } from 'hashy'
import { invalidCredentials, noSuchObject } from 'xo-common/api-errors'
import * as XenStore from '../_XenStore'
import { Groups } from '../models/group'
import { Users } from '../models/user'
import { forEach, isEmpty, lightSet, mapToArray } from '../utils'
@@ -69,12 +68,8 @@ export default class {
)
if (!(await usersDb.exists())) {
const {
email = 'admin@admin.net',
password = 'admin',
} = await XenStore.read('vm-data/admin-account')
.then(JSON.parse)
.catch(() => ({}))
const email = 'admin@admin.net'
const password = 'admin'
await this.createUser({ email, password, permission: 'admin' })
log.info(`Default user created: ${email} with password ${password}`)

View File

@@ -4,7 +4,6 @@ import { fibonacci } from 'iterable-backoff'
import { noSuchObject } from 'xo-common/api-errors'
import { pDelay, ignoreErrors } from 'promise-toolbox'
import * as XenStore from '../_XenStore'
import Xapi from '../xapi'
import xapiObjectToXo from '../xapi-object-to-xo'
import XapiStats from '../xapi-stats'
@@ -65,19 +64,8 @@ export default class {
servers => serversDb.update(servers)
)
const servers = await serversDb.get()
// Add servers in XenStore
if (servers.length === 0) {
const xenStoreServers = await XenStore.read('vm-data/xen-servers')
.then(JSON.parse)
.catch(() => [])
for (const server of xenStoreServers) {
servers.push(await this.registerXenServer(server))
}
}
// Connects to existing servers.
const servers = await serversDb.get()
for (const server of servers) {
if (server.enabled) {
this.connectXenServer(server.id).catch(error => {
@@ -386,12 +374,14 @@ export default class {
return value && JSON.parse(value)
},
setData: async (id, key, value) => {
await xapi
.getObject(id)
.update_other_config(
`xo:${camelToSnakeCase(key)}`,
value !== null ? JSON.stringify(value) : value
)
await xapi._updateObjectMapProperty(
xapi.getObject(id),
'other_config',
{
[`xo:${camelToSnakeCase(key)}`]:
value !== null ? JSON.stringify(value) : value,
}
)
// Register the updated object.
addObject(await xapi._waitObject(id))
@@ -455,11 +445,6 @@ export default class {
return xapi
}
// returns the XAPI object corresponding to an XO object
getXapiObject(xoObject) {
return this.getXapi(xoObject).getObjectByRef(xoObject._xapiRef)
}
_getXenServerStatus(id) {
const xapi = this._xapis[id]
return xapi === undefined

Some files were not shown because too many files have changed in this diff Show More