Compare commits
180 Commits
xen-api-Si
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e50dab093 | ||
|
|
d1935bf778 | ||
|
|
70a346d11e | ||
|
|
fd39a2063d | ||
|
|
682512fffe | ||
|
|
b13f91ec8d | ||
|
|
a140fc09ac | ||
|
|
f403a7e753 | ||
|
|
033d784c52 | ||
|
|
62c3fa13ca | ||
|
|
ce338cb6ca | ||
|
|
8782151c5d | ||
|
|
b22c74c5a8 | ||
|
|
a3e4253005 | ||
|
|
2388593b8a | ||
|
|
cdced63c1b | ||
|
|
45e1d1ecef | ||
|
|
f44447ce71 | ||
|
|
238e9cd8cc | ||
|
|
e171d8ed0e | ||
|
|
bd3399e04b | ||
|
|
2b4443f333 | ||
|
|
ab6548122f | ||
|
|
f81573d999 | ||
|
|
84ccebb858 | ||
|
|
530bc50e7c | ||
|
|
57e490fc23 | ||
|
|
61e902c094 | ||
|
|
8378ba77d6 | ||
|
|
c9e30b74e2 | ||
|
|
af944fd2e3 | ||
|
|
bcc0e76f1d | ||
|
|
95078d250a | ||
|
|
4b16a2c0c5 | ||
|
|
b8524732ce | ||
|
|
d641d35d5c | ||
|
|
7464d95b57 | ||
|
|
3d6aa667fe | ||
|
|
147c3d2e7b | ||
|
|
ac298c3be3 | ||
|
|
e88848c44a | ||
|
|
cd518e3e4c | ||
|
|
24d4fad394 | ||
|
|
6d8785e689 | ||
|
|
508cbf0a82 | ||
|
|
c83f56166d | ||
|
|
7199e1a214 | ||
|
|
cc2c71c076 | ||
|
|
9ca273b2c4 | ||
|
|
b85c2f35b6 | ||
|
|
fdd79885f9 | ||
|
|
b2eb970796 | ||
|
|
3ee9c1b550 | ||
|
|
2566c24753 | ||
|
|
49e1b0ba7e | ||
|
|
453c329f14 | ||
|
|
27193f38f3 | ||
|
|
d3dc94e210 | ||
|
|
6dad860635 | ||
|
|
0362ac8909 | ||
|
|
e7b79f83d1 | ||
|
|
62379c1e41 | ||
|
|
23b422e3df | ||
|
|
f8e6dee635 | ||
|
|
c8e9b287f4 | ||
|
|
c9412dbcd0 | ||
|
|
77222e9e6b | ||
|
|
9d0f24eae1 | ||
|
|
6e527947be | ||
|
|
e7051c1129 | ||
|
|
3196c7ca09 | ||
|
|
0e1e32d241 | ||
|
|
a34912fb0d | ||
|
|
c7c6e0e2ff | ||
|
|
1e529c995a | ||
|
|
7be1c7a47b | ||
|
|
b17380443b | ||
|
|
59e68682bd | ||
|
|
b7a92cfe92 | ||
|
|
5ebe27da49 | ||
|
|
42df6ba6fa | ||
|
|
8210fddfab | ||
|
|
f55ed273c5 | ||
|
|
d67e95af7b | ||
|
|
0b0f235252 | ||
|
|
36a5f52068 | ||
|
|
31266728f7 | ||
|
|
8c79ea4ce3 | ||
|
|
c73a4204cb | ||
|
|
0b3c2cc252 | ||
|
|
2bd3ca1d0b | ||
|
|
ce8649d991 | ||
|
|
9bd563b111 | ||
|
|
6ceb924a85 | ||
|
|
c2ef0ded43 | ||
|
|
6081a6f6db | ||
|
|
a0d92a0b1d | ||
|
|
3cf1f7ede2 | ||
|
|
5757afa1d8 | ||
|
|
86e9b9c1b8 | ||
|
|
1cdd1fa00e | ||
|
|
9d12759c68 | ||
|
|
594341fab6 | ||
|
|
4e88125cbe | ||
|
|
13237180a2 | ||
|
|
f64d7e0b6e | ||
|
|
040a6930a4 | ||
|
|
c54b9189a6 | ||
|
|
8882f1b019 | ||
|
|
ae6416c4d2 | ||
|
|
8faed87656 | ||
|
|
0983f05969 | ||
|
|
d43e2544a1 | ||
|
|
ca83d11ac8 | ||
|
|
1cdcdd9b5f | ||
|
|
cc7806e35b | ||
|
|
0ee48b6623 | ||
|
|
8c02e0efbd | ||
|
|
34d3ca82bc | ||
|
|
43822d3667 | ||
|
|
f4ac73b3b4 | ||
|
|
f084b6def9 | ||
|
|
a00d101ff7 | ||
|
|
9d5900d9b6 | ||
|
|
28fb4e8216 | ||
|
|
bec4dbe652 | ||
|
|
72cc14f508 | ||
|
|
d20941cc2c | ||
|
|
9cb8a05316 | ||
|
|
dccd799f6d | ||
|
|
b42b3d1b01 | ||
|
|
a40d6f772a | ||
|
|
6e9bfd18d9 | ||
|
|
3b92dd0139 | ||
|
|
564d53610a | ||
|
|
b4c7b8ac7f | ||
|
|
7acd90307b | ||
|
|
d3ec76c19f | ||
|
|
688cb20674 | ||
|
|
c63be20bea | ||
|
|
df36633223 | ||
|
|
3597621d88 | ||
|
|
8387684839 | ||
|
|
f261f395f1 | ||
|
|
f27170ff0e | ||
|
|
d82c951db6 | ||
|
|
41ca853e03 | ||
|
|
a08d098265 | ||
|
|
875681b8ce | ||
|
|
a03dcbbf55 | ||
|
|
97cabbbc69 | ||
|
|
13725a9e21 | ||
|
|
f47df961f7 | ||
|
|
2f644d5eeb | ||
|
|
4b292bb78c | ||
|
|
804891cc81 | ||
|
|
d335e06371 | ||
|
|
477058ad23 | ||
|
|
eb3b68401d | ||
|
|
865d2df124 | ||
|
|
88160bae1d | ||
|
|
f581e93b88 | ||
|
|
21a7cf7158 | ||
|
|
5edee4bae0 | ||
|
|
916ca5576a | ||
|
|
6c861bfd1f | ||
|
|
56961b55bd | ||
|
|
cdcd7154ba | ||
|
|
654a2ee870 | ||
|
|
903634073a | ||
|
|
0d4818feb6 | ||
|
|
d6aa40679b | ||
|
|
b7cc31c94d | ||
|
|
6860156b6f | ||
|
|
29486c9ce2 | ||
|
|
7cfa6a5da4 | ||
|
|
2563be472b | ||
|
|
7289e856d9 | ||
|
|
975de1954e | ||
|
|
95bcf0c080 |
@@ -1,5 +1,7 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
@@ -19,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['packages/*cli*/**/*.js', '*-cli.js'],
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
@@ -33,6 +35,9 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.25.0"
|
||||
"xen-api": "^0.25.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.8.0",
|
||||
"version": "0.9.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -21,18 +21,18 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.13.0",
|
||||
"@marsaud/smb2": "^0.14.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -45,7 +45,7 @@
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^7.0.0",
|
||||
"dotenv": "^8.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -400,6 +400,10 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async truncate(file: string, len: number): Promise<void> {
|
||||
await this._truncate(file, len)
|
||||
}
|
||||
|
||||
async unlink(file: string, { checksum = true }: Object = {}): Promise<void> {
|
||||
file = normalizePath(file)
|
||||
|
||||
@@ -410,6 +414,18 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
@@ -546,6 +562,28 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
|
||||
const isPath = typeof file === 'string'
|
||||
if (isPath) {
|
||||
file = await this.openFile(file, 'r+')
|
||||
}
|
||||
try {
|
||||
return await this._writeFd(file, buffer, position)
|
||||
} finally {
|
||||
if (isPath) {
|
||||
await this.closeFile(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { random } from 'lodash'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -310,5 +310,70 @@ handlers.forEach(url => {
|
||||
await handler.unlink('file')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#write()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
|
||||
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
|
||||
|
||||
forOwn(
|
||||
{
|
||||
'dont increase file size': (() => {
|
||||
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
|
||||
|
||||
const expected = Buffer.from(TEST_DATA)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
},
|
||||
({ offset, expected }, title) => {
|
||||
describe(title, () => {
|
||||
testWithFileDescriptor('file', 'r+', async ({ file }) => {
|
||||
await handler.write(file, PATCH_DATA, offset)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
describe('#truncate()', () => {
|
||||
forOwn(
|
||||
{
|
||||
'shrinks file': (() => {
|
||||
const length = random(0, TEST_DATA_LEN)
|
||||
const expected = TEST_DATA.slice(0, length)
|
||||
return { length, expected }
|
||||
})(),
|
||||
'grows file': (() => {
|
||||
const length = random(TEST_DATA_LEN, TEST_DATA_LEN * 2)
|
||||
const expected = Buffer.alloc(length)
|
||||
TEST_DATA.copy(expected)
|
||||
return { length, expected }
|
||||
})(),
|
||||
},
|
||||
({ length, expected }, title) => {
|
||||
it(title, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.truncate('file', length)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -106,10 +106,18 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
await fs.access(path, fs.R_OK | fs.W_OK)
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return fs.truncate(this._getFilePath(file), len)
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
return fs.unlink(this._getFilePath(file))
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
|
||||
@@ -155,10 +155,20 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this.list('.')
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client
|
||||
.truncate(this._getFilePath(file), len)
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
|
||||
@@ -48,6 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
105
CHANGELOG.md
@@ -1,5 +1,110 @@
|
||||
# ChangeLog
|
||||
|
||||
## **next**
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/Advanced] Ability to use UEFI instead of BIOS [#4264](https://github.com/vatesfr/xen-orchestra/issues/4264) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4268))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [XOA] Don't require editing the _email_ field in case of re-registration (PR [#4259](https://github.com/vatesfr/xen-orchestra/pull/4259))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.2
|
||||
- xo-server v5.43.0
|
||||
- xo-web v5.43.0
|
||||
|
||||
## **5.35.0** (2019-05-29)
|
||||
|
||||

|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM/general] Display 'Started... ago' instead of 'Halted... ago' for paused state [#3750](https://github.com/vatesfr/xen-orchestra/issues/3750) (PR [#4170](https://github.com/vatesfr/xen-orchestra/pull/4170))
|
||||
- [Metadata backup] Ability to define when the backup report will be sent (PR [#4149](https://github.com/vatesfr/xen-orchestra/pull/4149))
|
||||
- [XOA/Update] Ability to select release channel [#4200](https://github.com/vatesfr/xen-orchestra/issues/4200) (PR [#4202](https://github.com/vatesfr/xen-orchestra/pull/4202))
|
||||
- [User] Forget connection tokens on password change or on demand [#4214](https://github.com/vatesfr/xen-orchestra/issues/4214) (PR [#4224](https://github.com/vatesfr/xen-orchestra/pull/4224))
|
||||
- [Settings/Logs] LICENCE_RESTRICTION errors: suggest XCP-ng as an Open Source alternative [#3876](https://github.com/vatesfr/xen-orchestra/issues/3876) (PR [#4238](https://github.com/vatesfr/xen-orchestra/pull/4238))
|
||||
- [VM/Migrate] Display VDI size on migrate modal [#2534](https://github.com/vatesfr/xen-orchestra/issues/2534) (PR [#4250](https://github.com/vatesfr/xen-orchestra/pull/4250))
|
||||
- [Host] Display hyperthreading status on advanced tab [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4263](https://github.com/vatesfr/xen-orchestra/pull/4263))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
|
||||
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
|
||||
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
|
||||
- [Charts] Fixed the chart lines sometimes changing order/color (PR [#4221](https://github.com/vatesfr/xen-orchestra/pull/4221))
|
||||
- Prevent non-admin users to access admin pages with URL (PR [#4220](https://github.com/vatesfr/xen-orchestra/pull/4220))
|
||||
- [Upgrade] Fix alert before upgrade while running backup jobs [#4164](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4235))
|
||||
- [Import] Fix import OVA files (PR [#4232](https://github.com/vatesfr/xen-orchestra/pull/4232))
|
||||
- [VM/network] Fix duplicate IPv4 (PR [#4239](https://github.com/vatesfr/xen-orchestra/pull/4239))
|
||||
- [Remotes] Fix disconnected remotes which may appear to work
|
||||
- [Host] Fix incorrect hypervisor name [#4246](https://github.com/vatesfr/xen-orchestra/issues/4246) (PR [#4248](https://github.com/vatesfr/xen-orchestra/pull/4248))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.1
|
||||
- @xen-orchestra/fs v0.9.0
|
||||
- vhd-lib v0.7.0
|
||||
- xo-server v5.42.1
|
||||
- xo-web v5.42.1
|
||||
|
||||
## **5.34.0** (2019-04-30)
|
||||
|
||||

|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
|
||||
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
|
||||
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
|
||||
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
|
||||
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
|
||||
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
|
||||
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
|
||||
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
|
||||
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
|
||||
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
|
||||
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
|
||||
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
|
||||
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
|
||||
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
|
||||
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
|
||||
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
|
||||
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.0
|
||||
- complex-matcher v0.6.0
|
||||
- xo-vmdk-to-vhd v0.1.7
|
||||
- vhd-lib v0.6.1
|
||||
- xo-server v5.40.0
|
||||
- xo-web v5.40.1
|
||||
|
||||
## **5.33.1** (2019-04-04)
|
||||
|
||||
### Bug fix
|
||||
|
||||
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.1
|
||||
- xo-server v5.38.2
|
||||
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -2,15 +2,31 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
|
||||
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
|
||||
- [Backup-ng/restore] Display size for full VM backup [#4009](https://github.com/vatesfr/xen-orchestra/issues/4009) (PR [#4245](https://github.com/vatesfr/xen-orchestra/pull/4245))
|
||||
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
|
||||
- [auth-saml] Improve compatibility with Microsoft Azure Active Directory (PR [#4294](https://github.com/vatesfr/xen-orchestra/pull/4294))
|
||||
- [Host] Display warning when "Citrix Hypervisor" license has restrictions [#4251](https://github.com/vatesfr/xen-orchestra/issues/4164) (PR [#4235](https://github.com/vatesfr/xen-orchestra/pull/4279))
|
||||
- [VM/Backup] Create backup bulk action [#2573](https://github.com/vatesfr/xen-orchestra/issues/2573) (PR [#4257](https://github.com/vatesfr/xen-orchestra/pull/4257))
|
||||
- [Sr/new] Ability to select NFS version when creating NFS storage [#3951](https://github.com/vatesfr/xen-orchestra/issues/#3951) (PR [#4277](https://github.com/vatesfr/xen-orchestra/pull/4277))
|
||||
- [SR/new] Create ZFS storage [#4260](https://github.com/vatesfr/xen-orchestra/issues/4260) (PR [#4266](https://github.com/vatesfr/xen-orchestra/pull/4266))
|
||||
- [Host] Display warning when host's time differs too much from XOA's time [#4113](https://github.com/vatesfr/xen-orchestra/issues/4113) (PR [#4173](https://github.com/vatesfr/xen-orchestra/pull/4173))
|
||||
- [Host/storages, SR/hosts] Display PBD details [#4264](https://github.com/vatesfr/xen-orchestra/issues/4161) (PR [#4268](https://github.com/vatesfr/xen-orchestra/pull/4284))
|
||||
- [VM/network] Display and set bandwidth rate-limit of a VIF [#4215](https://github.com/vatesfr/xen-orchestra/issues/4215) (PR [#4293](https://github.com/vatesfr/xen-orchestra/pull/4293))
|
||||
- [SDN Controller] New plugin which enables creating pool-wide private networks [xcp-ng/xcp#175](https://github.com/xcp-ng/xcp/issues/175) (PR [#4269](https://github.com/vatesfr/xen-orchestra/pull/4269))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Metadata backup] Missing XAPIs should trigger a failure job [#4281](https://github.com/vatesfr/xen-orchestra/issues/4281) (PR [#4283](https://github.com/vatesfr/xen-orchestra/pull/4283))
|
||||
- [Host/advanced] Fix host CPU hyperthreading detection [#4262](https://github.com/vatesfr/xen-orchestra/issues/4262) (PR [#4285](https://github.com/vatesfr/xen-orchestra/pull/4285))
|
||||
- [iSCSI] Fix fibre channel paths display [#4291](https://github.com/vatesfr/xen-orchestra/issues/4291) (PR [#4303](https://github.com/vatesfr/xen-orchestra/pull/4303))
|
||||
- [New VM] Fix tooltips not displayed on disabled elements in some browsers (e.g. Google Chrome) [#4304](https://github.com/vatesfr/xen-orchestra/issues/4304) (PR [#4309](https://github.com/vatesfr/xen-orchestra/pull/4309))
|
||||
|
||||
### Released packages
|
||||
|
||||
- vhd-lib v0.6.1
|
||||
- xo-server v5.39.0
|
||||
- xo-web v5.39.0
|
||||
- xo-server-auth-ldap v0.6.5
|
||||
- xen-api v0.26.0
|
||||
- xo-server-sdn-controller v0.1
|
||||
- xo-server-auth-saml v0.6.0
|
||||
- xo-server-backup-reports v0.16.2
|
||||
- xo-server v5.44.0
|
||||
- xo-web v5.44.0
|
||||
|
||||
@@ -14,5 +14,5 @@
|
||||
|
||||
1. create a PR as soon as possible
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer
|
||||
1. when you want a review, add a reviewer (and only one)
|
||||
1. if necessary, update your PR, and re- add a reviewer
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Xen Orchestra [](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# Xen Orchestra [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||

|
||||
|
||||
|
||||
BIN
docs/assets/metadata-1.png
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
BIN
docs/assets/metadata-2.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
docs/assets/metadata-3.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/metadata-4.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/assets/metadata-5.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/assets/metadata-6.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/metadata-7.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -1,13 +1,13 @@
|
||||
|
||||
# Installation
|
||||
|
||||
SSH to your XenServer and execute the following:
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Metadata backup
|
||||
|
||||
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
|
||||
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -11,21 +11,38 @@ In Xen Orchestra, Metadata backup is divided into two different options:
|
||||
* Pool metadata backup
|
||||
* XO configuration backup
|
||||
|
||||
### How to use metadata backup
|
||||
### Performing a backup
|
||||
|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
|
||||

|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
|
||||

|
||||
|
||||
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
|
||||
|
||||

|
||||

|
||||
|
||||
Define the name and retention for the job.
|
||||
|
||||

|
||||

|
||||
|
||||
Once created, the job is displayed with the other classic jobs.
|
||||
|
||||

|
||||

|
||||
|
||||
> Restore for metadata backup jobs should be available in XO 5.33
|
||||
|
||||
### Performing a restore
|
||||
|
||||
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
|
||||
|
||||
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
|
||||
|
||||

|
||||
|
||||
If you click this button, it will show you Metadata backups available for restore:
|
||||
|
||||

|
||||
|
||||
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
|
||||
|
||||

|
||||
|
||||
That's it!
|
||||
@@ -1,24 +1,33 @@
|
||||
# Support
|
||||
|
||||
You can access our pro support if you subscribe to any of these plans:
|
||||
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
|
||||
|
||||
XOA is available in multiple plans:
|
||||
|
||||
* Free
|
||||
* Starter
|
||||
* Enterprise
|
||||
* Premium
|
||||
|
||||
The better the plan, the faster the support will be with higher priority.
|
||||
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
## XOA Free support
|
||||
|
||||
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
|
||||
|
||||
## Community support
|
||||
|
||||
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
|
||||
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
|
||||
|
||||
* In our [forum](https://xen-orchestra.com/forum/)
|
||||
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
|
||||
* In our IRC - `#xen-orchestra` on `Freenode`
|
||||
|
||||
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
We encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
|
||||
If you are using Xen Orchestra in production, please subscribe to a plan.
|
||||
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
|
||||
|
||||
## Open a ticket
|
||||
|
||||
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
@@ -10,15 +10,16 @@
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^8.0.0",
|
||||
"eslint-plugin-node": "^9.0.1",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.95.1",
|
||||
"flow-bin": "^0.100.0",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"husky": "^2.2.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -44,6 +44,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -599,6 +599,13 @@ export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const _extractStringFromRegexp = child => {
|
||||
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
|
||||
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
|
||||
return unescapedRegexp
|
||||
}
|
||||
}
|
||||
|
||||
const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof Or) {
|
||||
const strings = []
|
||||
@@ -606,6 +613,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
strings.push(child.value)
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
strings.push(unescapedRegexp)
|
||||
}
|
||||
}
|
||||
})
|
||||
return strings
|
||||
}
|
||||
@@ -613,6 +626,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
return [child.value]
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
return [unescapedRegexp]
|
||||
}
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
@@ -12,10 +12,13 @@ import {
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
|
||||
const tmp = getPropertyClausesStrings(
|
||||
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
|
||||
)
|
||||
expect(tmp).toEqual({
|
||||
bar: ['baz'],
|
||||
baz: ['foo', 'bar'],
|
||||
baz: ['foo', 'bar', 'boo', 'far'],
|
||||
foo: ['bar'],
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.6.0"
|
||||
"vhd-lib": "^0.7.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -44,13 +44,14 @@
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.6.0",
|
||||
"version": "0.7.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -22,9 +22,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"struct-fu": "^1.2.0",
|
||||
@@ -35,16 +35,16 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"get-stream": "^5.1.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -52,6 +52,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import constantStream from './_constant-stream'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
@@ -232,24 +230,11 @@ export default class Vhd {
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
// Write a buffer at a given position in a vhd file.
|
||||
async _write(data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
assert(Buffer.isBuffer(data))
|
||||
debug(`_write offset=${offset} size=${data.length}`)
|
||||
return this._handler.write(this._path, data, offset)
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
@@ -306,7 +291,7 @@ export default class Vhd {
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
@@ -331,10 +316,7 @@ export default class Vhd {
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.25.0"
|
||||
"xen-api": "^0.25.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
@@ -56,6 +56,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ console.log(xapi.pool.$master.$resident_VMs[0].name_label)
|
||||
A CLI is provided to help exploration and discovery of the XAPI.
|
||||
|
||||
```
|
||||
> xen-api https://xen1.company.net root
|
||||
> xen-api xen1.company.net root
|
||||
Password: ******
|
||||
root@xen1.company.net> xapi.status
|
||||
'connected'
|
||||
@@ -92,6 +92,14 @@ root@xen1.company.net> xapi.pool.$master.name_label
|
||||
'xen1'
|
||||
```
|
||||
|
||||
You can optionally prefix the address by a protocol: `https://` (default) or `http://`.
|
||||
|
||||
In case of error due to invalid or self-signed certificates you can use the `--allow-unauthorized` flag (or `--au`):
|
||||
|
||||
```
|
||||
> xen-api --au xen1.company.net root
|
||||
```
|
||||
|
||||
To ease searches, `find()` and `findAll()` functions are available:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.25.0",
|
||||
"version": "0.25.2",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -33,6 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
@@ -68,6 +69,7 @@
|
||||
"plot": "gnuplot -p memory-test.gnu",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
function request() {
|
||||
if (this._requested) {
|
||||
return
|
||||
}
|
||||
|
||||
this._requested = true
|
||||
|
||||
const resolve = this._resolve
|
||||
if (resolve !== undefined) {
|
||||
this._resolve = undefined
|
||||
resolve()
|
||||
}
|
||||
|
||||
const listeners = this._listeners
|
||||
if (listeners !== undefined) {
|
||||
this._listeners = undefined
|
||||
for (let i = 0, n = listeners.length; i < n; ++i) {
|
||||
listeners[i].call(this)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const INTERNAL = {}
|
||||
|
||||
function Source(signals) {
|
||||
const request_ = (this.request = request.bind(
|
||||
(this.signal = new Signal(INTERNAL))
|
||||
))
|
||||
|
||||
if (signals === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const n = signals.length
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (signals[i].requested) {
|
||||
request_()
|
||||
return
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < n; ++i) {
|
||||
signals[i].addListener(request_)
|
||||
}
|
||||
}
|
||||
|
||||
class Subscription {
|
||||
constructor(signal, listener) {
|
||||
this._listener = listener
|
||||
this._signal = signal
|
||||
}
|
||||
|
||||
get closed() {
|
||||
return this._signal === undefined
|
||||
}
|
||||
|
||||
unsubscribe() {
|
||||
const signal = this._signal
|
||||
if (signal !== undefined) {
|
||||
const listener = this._listener
|
||||
this._listener = this._signal = undefined
|
||||
|
||||
const listeners = signal._listeners
|
||||
if (listeners !== undefined) {
|
||||
const i = listeners.indexOf(listener)
|
||||
if (i !== -1) {
|
||||
listeners.splice(i, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const closedSubscription = new Subscription()
|
||||
|
||||
export default class Signal {
|
||||
static source(signals) {
|
||||
return new Source(signals)
|
||||
}
|
||||
|
||||
constructor(executor) {
|
||||
this._listeners = undefined
|
||||
this._promise = undefined
|
||||
this._requested = false
|
||||
this._resolve = undefined
|
||||
|
||||
if (executor !== INTERNAL) {
|
||||
executor(request.bind(this))
|
||||
}
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this._description
|
||||
}
|
||||
|
||||
get requested() {
|
||||
return this._requested
|
||||
}
|
||||
|
||||
throwIfRequested() {
|
||||
if (this._requested) {
|
||||
throw new Error('this signal has been requested')
|
||||
}
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Promise like API
|
||||
// ===========================================================================
|
||||
|
||||
then(listener) {
|
||||
if (typeof listener !== 'function') {
|
||||
return this
|
||||
}
|
||||
|
||||
let promise = this._promise
|
||||
if (promise === undefined) {
|
||||
const requested = this._requested
|
||||
promise = this._promise = requested
|
||||
? Promise.resolve()
|
||||
: new Promise(resolve => {
|
||||
this._resolve = resolve
|
||||
})
|
||||
}
|
||||
return promise.then(listener)
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Observable like API (but not compatible)
|
||||
// ===========================================================================
|
||||
|
||||
subscribe(listener) {
|
||||
if (this._requested) {
|
||||
listener.call(this)
|
||||
return closedSubscription
|
||||
}
|
||||
const listeners = this._listeners
|
||||
if (listeners === undefined) {
|
||||
this._listeners = [listener]
|
||||
} else {
|
||||
listeners.push(listener)
|
||||
}
|
||||
return new Subscription(this, listener)
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
import { createClient } from './'
|
||||
@@ -25,6 +26,20 @@ function askPassword(prompt = 'Password: ') {
|
||||
})
|
||||
}
|
||||
|
||||
const { getPrototypeOf, ownKeys } = Reflect
|
||||
function getAllBoundDescriptors(object) {
|
||||
const descriptors = { __proto__: null }
|
||||
let current = object
|
||||
do {
|
||||
ownKeys(current).forEach(key => {
|
||||
if (!(key in descriptors)) {
|
||||
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
|
||||
}
|
||||
})
|
||||
} while ((current = getPrototypeOf(current)) !== null)
|
||||
return descriptors
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
|
||||
@@ -78,11 +93,17 @@ const main = async args => {
|
||||
const repl = createRepl({
|
||||
prompt: `${xapi._humanId}> `,
|
||||
})
|
||||
repl.context.xapi = xapi
|
||||
|
||||
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
repl.context.find = predicate => find(xapi.objects.all, predicate)
|
||||
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
{
|
||||
const ctx = repl.context
|
||||
ctx.xapi = xapi
|
||||
|
||||
ctx.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
ctx.find = predicate => find(xapi.objects.all, predicate)
|
||||
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
|
||||
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
|
||||
}
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
|
||||
@@ -25,7 +25,6 @@ import isReadOnlyCall from './_isReadOnlyCall'
|
||||
import makeCallSetting from './_makeCallSetting'
|
||||
import parseUrl from './_parseUrl'
|
||||
import replaceSensitiveValues from './_replaceSensitiveValues'
|
||||
import Signal from './_Signal'
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
// ===================================================================
|
||||
@@ -35,7 +34,7 @@ const EVENT_TIMEOUT = 60
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const { defineProperties, freeze, keys: getKeys } = Object
|
||||
const { defineProperties, defineProperty, freeze, keys: getKeys } = Object
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -93,15 +92,19 @@ export class Xapi extends EventEmitter {
|
||||
this._allowUnauthorized = opts.allowUnauthorized
|
||||
this._setUrl(url)
|
||||
|
||||
this._connected = Signal.source()
|
||||
this._disconnected = Signal.source()
|
||||
this._connected = new Promise(resolve => {
|
||||
this._resolveConnected = resolve
|
||||
})
|
||||
this._disconnected = Promise.resolve()
|
||||
this._sessionId = undefined
|
||||
this._status = DISCONNECTED
|
||||
|
||||
this._debounce = opts.debounce ?? 200
|
||||
this._objects = new Collection()
|
||||
this._objectsByRef = { __proto__: null }
|
||||
this._objectsFetched = Signal.source()
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
this._eventWatchers = { __proto__: null }
|
||||
this._taskWatchers = { __proto__: null }
|
||||
this._watchedTypes = undefined
|
||||
@@ -127,11 +130,11 @@ export class Xapi extends EventEmitter {
|
||||
// ===========================================================================
|
||||
|
||||
get connected() {
|
||||
return this._connected.signal
|
||||
return this._connected
|
||||
}
|
||||
|
||||
get disconnected() {
|
||||
return this._disconnected.signal
|
||||
return this._disconnected
|
||||
}
|
||||
|
||||
get pool() {
|
||||
@@ -158,30 +161,17 @@ export class Xapi extends EventEmitter {
|
||||
assert(status === DISCONNECTED)
|
||||
|
||||
this._status = CONNECTING
|
||||
this._disconnected = Signal.source()
|
||||
this._disconnected = new Promise(resolve => {
|
||||
this._resolveDisconnected = resolve
|
||||
})
|
||||
|
||||
try {
|
||||
await this._sessionOpen()
|
||||
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
types.forEach(type => {
|
||||
const lcType = type.toLowerCase()
|
||||
if (lcType !== type) {
|
||||
this._lcToTypes[lcType] = type
|
||||
}
|
||||
})
|
||||
|
||||
this._pool = (await this.getAllRecords('pool'))[0]
|
||||
|
||||
debug('%s: connected', this._humanId)
|
||||
this._status = CONNECTED
|
||||
this._connected.request()
|
||||
this._resolveConnected()
|
||||
this._resolveConnected = undefined
|
||||
this.emit(CONNECTED)
|
||||
} catch (error) {
|
||||
ignoreErrors.call(this.disconnect())
|
||||
@@ -198,7 +188,9 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
if (status === CONNECTED) {
|
||||
this._connected = Signal.source()
|
||||
this._connected = new Promise(resolve => {
|
||||
this._resolveConnected = resolve
|
||||
})
|
||||
} else {
|
||||
assert(status === CONNECTING)
|
||||
}
|
||||
@@ -212,7 +204,8 @@ export class Xapi extends EventEmitter {
|
||||
debug('%s: disconnected', this._humanId)
|
||||
|
||||
this._status = DISCONNECTED
|
||||
this._disconnected.request()
|
||||
this._resolveDisconnected()
|
||||
this._resolveDisconnected = undefined
|
||||
this.emit(DISCONNECTED)
|
||||
}
|
||||
|
||||
@@ -663,41 +656,47 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
_interruptOnDisconnect(promise) {
|
||||
let subscription
|
||||
let listener
|
||||
const pWrapper = new Promise((resolve, reject) => {
|
||||
subscription = this._disconnected.signal.subscribe(() => {
|
||||
reject(new Error('disconnected'))
|
||||
})
|
||||
promise.then(resolve, reject)
|
||||
this.on(
|
||||
DISCONNECTED,
|
||||
(listener = () => {
|
||||
reject(new Error('disconnected'))
|
||||
})
|
||||
)
|
||||
})
|
||||
const clean = () => {
|
||||
subscription.unsubscribe()
|
||||
this.removeListener(DISCONNECTED, listener)
|
||||
}
|
||||
pWrapper.then(clean, clean)
|
||||
return pWrapper
|
||||
}
|
||||
|
||||
async _sessionCall(method, args, timeout) {
|
||||
_sessionCallRetryOptions = {
|
||||
tries: 2,
|
||||
when: error =>
|
||||
this._status !== DISCONNECTED && error?.code === 'SESSION_INVALID',
|
||||
onRetry: () => this._sessionOpen(),
|
||||
}
|
||||
_sessionCall(method, args, timeout) {
|
||||
if (method.startsWith('session.')) {
|
||||
throw new Error('session.*() methods are disabled from this interface')
|
||||
return Promise.reject(
|
||||
new Error('session.*() methods are disabled from this interface')
|
||||
)
|
||||
}
|
||||
|
||||
const sessionId = this._sessionId
|
||||
assert.notStrictEqual(sessionId, undefined)
|
||||
return pRetry(() => {
|
||||
const sessionId = this._sessionId
|
||||
assert.notStrictEqual(sessionId, undefined)
|
||||
|
||||
const newArgs = [sessionId]
|
||||
if (args !== undefined) {
|
||||
newArgs.push.apply(newArgs, args)
|
||||
}
|
||||
|
||||
return pRetry(
|
||||
() => this._interruptOnDisconnect(this._call(method, newArgs, timeout)),
|
||||
{
|
||||
tries: 2,
|
||||
when: { code: 'SESSION_INVALID' },
|
||||
onRetry: () => this._sessionOpen(),
|
||||
const newArgs = [sessionId]
|
||||
if (args !== undefined) {
|
||||
newArgs.push.apply(newArgs, args)
|
||||
}
|
||||
)
|
||||
|
||||
return this._call(method, newArgs, timeout)
|
||||
}, this._sessionCallRetryOptions)
|
||||
}
|
||||
|
||||
// FIXME: (probably rare) race condition leading to unnecessary login when:
|
||||
@@ -724,6 +723,28 @@ export class Xapi extends EventEmitter {
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const oldPoolRef = this._pool?.$ref
|
||||
this._pool = (await this.getAllRecords('pool'))[0]
|
||||
|
||||
// if the pool ref has changed, it means that the XAPI has been restarted or
|
||||
// it's not the same XAPI, we need to refetch the available types and reset
|
||||
// the event loop in that case
|
||||
if (this._pool.$ref !== oldPoolRef) {
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
types.forEach(type => {
|
||||
const lcType = type.toLowerCase()
|
||||
if (lcType !== type) {
|
||||
this._lcToTypes[lcType] = type
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_setUrl(url) {
|
||||
@@ -878,8 +899,10 @@ export class Xapi extends EventEmitter {
|
||||
async _watchEvents() {
|
||||
// eslint-disable-next-line no-labels
|
||||
mainLoop: while (true) {
|
||||
if (this._objectsFetched.signal.requested) {
|
||||
this._objectsFetched = Signal.source()
|
||||
if (this._resolveObjectsFetched === undefined) {
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
}
|
||||
|
||||
await this._connected
|
||||
@@ -907,7 +930,8 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
// initial fetch
|
||||
await this._refreshCachedRecords(types)
|
||||
this._objectsFetched.request()
|
||||
this._resolveObjectsFetched()
|
||||
this._resolveObjectsFetched = undefined
|
||||
|
||||
// event loop
|
||||
const debounce = this._debounce
|
||||
@@ -918,9 +942,12 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await this._sessionCall(
|
||||
// don't use _sessionCall because a session failure should break the
|
||||
// loop and trigger a complete refetch
|
||||
result = await this._call(
|
||||
'event.from',
|
||||
[
|
||||
this._sessionId,
|
||||
types,
|
||||
fromToken,
|
||||
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
|
||||
@@ -928,7 +955,8 @@ export class Xapi extends EventEmitter {
|
||||
EVENT_TIMEOUT * 1e3 * 1.1
|
||||
)
|
||||
} catch (error) {
|
||||
if (error?.code === 'EVENTS_LOST') {
|
||||
const code = error?.code
|
||||
if (code === 'EVENTS_LOST' || code === 'SESSION_INVALID') {
|
||||
// eslint-disable-next-line no-labels
|
||||
continue mainLoop
|
||||
}
|
||||
@@ -954,8 +982,10 @@ export class Xapi extends EventEmitter {
|
||||
//
|
||||
// It also has to manually get all objects first.
|
||||
async _watchEventsLegacy() {
|
||||
if (this._objectsFetched.signal.requested) {
|
||||
this._objectsFetched = Signal.source()
|
||||
if (this._resolveObjectsFetched === undefined) {
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
}
|
||||
|
||||
await this._connected
|
||||
@@ -964,7 +994,8 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
// initial fetch
|
||||
await this._refreshCachedRecords(types)
|
||||
this._objectsFetched.request()
|
||||
this._resolveObjectsFetched()
|
||||
this._resolveObjectsFetched = undefined
|
||||
|
||||
await this._sessionCall('event.register', [types])
|
||||
|
||||
@@ -1002,17 +1033,23 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const getObjectByRef = ref => this._objectsByRef[ref]
|
||||
|
||||
Record = function(ref, data) {
|
||||
defineProperties(this, {
|
||||
$id: { value: data.uuid ?? ref },
|
||||
$ref: { value: ref },
|
||||
$xapi: { value: xapi },
|
||||
})
|
||||
for (let i = 0; i < nFields; ++i) {
|
||||
const field = fields[i]
|
||||
this[field] = data[field]
|
||||
Record = defineProperty(
|
||||
function(ref, data) {
|
||||
defineProperties(this, {
|
||||
$id: { value: data.uuid ?? ref },
|
||||
$ref: { value: ref },
|
||||
$xapi: { value: xapi },
|
||||
})
|
||||
for (let i = 0; i < nFields; ++i) {
|
||||
const field = fields[i]
|
||||
this[field] = data[field]
|
||||
}
|
||||
},
|
||||
'name',
|
||||
{
|
||||
value: type,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
const getters = { $pool: getPool }
|
||||
const props = { $type: type }
|
||||
@@ -1032,9 +1069,14 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
props[`add_to_${field}`] = function(...values) {
|
||||
props[`add_${field}`] = function(value) {
|
||||
return xapi
|
||||
.call(`${type}.add_${field}`, this.$ref, values)
|
||||
.call(`${type}.add_${field}`, this.$ref, value)
|
||||
.then(noop)
|
||||
}
|
||||
props[`remove_${field}`] = function(value) {
|
||||
return xapi
|
||||
.call(`${type}.remove_${field}`, this.$ref, value)
|
||||
.then(noop)
|
||||
}
|
||||
} else if (value !== null && typeof value === 'object') {
|
||||
|
||||
@@ -25,5 +25,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"xo-common": "^0.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^11.11.4",
|
||||
"@types/node": "^12.0.2",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
@@ -55,6 +55,7 @@
|
||||
"lint": "tslint 'src/*.ts'",
|
||||
"posttest": "yarn run lint",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node dist/index.js"
|
||||
"start": "node dist/index.js",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"jsonrpc-websocket-client": "^0.4.1",
|
||||
"jsonrpc-websocket-client": "^0.5.0",
|
||||
"lodash": "^4.17.2",
|
||||
"make-error": "^1.0.4"
|
||||
},
|
||||
@@ -49,6 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,5 +41,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-ldap",
|
||||
"version": "0.6.4",
|
||||
"version": "0.6.5",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "LDAP authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -55,5 +55,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -234,6 +234,7 @@ class AuthLdap {
|
||||
entry.objectName
|
||||
} => ${username} authenticated`
|
||||
)
|
||||
logger(JSON.stringify(entry, null, 2))
|
||||
return { username }
|
||||
} catch (error) {
|
||||
logger(`failed to bind as ${entry.objectName}: ${error.message}`)
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"passport-saml": "^1.0.0"
|
||||
"passport-saml": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -24,7 +24,10 @@ export const configurationSchema = {
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
description: `Field to use as the XO username
|
||||
|
||||
You should try \`http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress\` if you are using Microsoft Azure Active Directory.
|
||||
`,
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.15.0",
|
||||
"version": "0.16.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,6 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -43,6 +44,8 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
@@ -55,5 +58,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, get, startCase } from 'lodash'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
@@ -46,6 +49,9 @@ export const testSchema = {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const INDENT = ' '
|
||||
const UNKNOWN_ITEM = 'Unknown'
|
||||
|
||||
const ICON_FAILURE = '🚨'
|
||||
const ICON_INTERRUPTED = '⚠️'
|
||||
const ICON_SKIPPED = '⏩'
|
||||
@@ -60,7 +66,7 @@ const STATUS_ICON = {
|
||||
}
|
||||
|
||||
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
const createDateFormatter = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
@@ -86,10 +92,6 @@ const formatSpeed = (bytes, milliseconds) =>
|
||||
})
|
||||
: 'N/A'
|
||||
|
||||
const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
@@ -100,40 +102,116 @@ const isSkippedError = error =>
|
||||
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
|
||||
error.message === NO_SUCH_OBJECT_ERROR
|
||||
|
||||
const INDENT = ' '
|
||||
const createGetTemporalDataMarkdown = formatDate => (
|
||||
start,
|
||||
end,
|
||||
nbIndent = 0
|
||||
) => {
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
// ===================================================================
|
||||
|
||||
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
|
||||
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
|
||||
const TITLE_BY_STATUS = {
|
||||
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
|
||||
interrupted: n => `## ${n} Interrupted`,
|
||||
skipped: n => `## ${n} Skipped`,
|
||||
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
|
||||
}
|
||||
|
||||
const getTemporalDataMarkdown = (end, start, formatDate) => {
|
||||
const markdown = [`- **Start time**: ${formatDate(start)}`]
|
||||
if (end !== undefined) {
|
||||
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
|
||||
markdown.push(`- **End time**: ${formatDate(end)}`)
|
||||
const duration = end - start
|
||||
if (duration >= 1) {
|
||||
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
|
||||
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
|
||||
}
|
||||
}
|
||||
return markdown
|
||||
}
|
||||
|
||||
const addWarnings = (text, warnings, nbIndent = 0) => {
|
||||
if (warnings === undefined) {
|
||||
const getWarningsMarkdown = (warnings = []) =>
|
||||
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
|
||||
|
||||
const getErrorMarkdown = task => {
|
||||
let message
|
||||
if (
|
||||
task.status === 'success' ||
|
||||
(message = task.result?.message ?? task.result?.code) === undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
warnings.forEach(({ message }) => {
|
||||
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
|
||||
})
|
||||
const label = task.status === 'skipped' ? 'Reason' : 'Error'
|
||||
return `- **${label}**: ${message}`
|
||||
}
|
||||
|
||||
const MARKDOWN_BY_TYPE = {
|
||||
pool(task, { formatDate }) {
|
||||
const { id, pool = {}, poolMaster = {} } = task.data
|
||||
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
|
||||
|
||||
return {
|
||||
body: [
|
||||
pool.uuid !== undefined
|
||||
? `- **UUID**: ${pool.uuid}`
|
||||
: `- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[pool] ${name}`,
|
||||
}
|
||||
},
|
||||
xo(task, { formatDate, jobName }) {
|
||||
return {
|
||||
body: [
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[XO] ${jobName}`,
|
||||
}
|
||||
},
|
||||
async remote(task, { formatDate, xo }) {
|
||||
const id = task.data.id
|
||||
const name = await xo.getRemote(id).then(
|
||||
({ name }) => name,
|
||||
error => {
|
||||
logger.warn(error)
|
||||
return UNKNOWN_ITEM
|
||||
}
|
||||
)
|
||||
return {
|
||||
body: [
|
||||
`- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[remote] ${name}`,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
let indentLevel = 0
|
||||
|
||||
const helper = part => {
|
||||
if (typeof part === 'string') {
|
||||
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
|
||||
} else if (Array.isArray(part)) {
|
||||
++indentLevel
|
||||
part.forEach(helper)
|
||||
--indentLevel
|
||||
}
|
||||
}
|
||||
helper(parts)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class BackupReportsXoPlugin {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
this._report = this._wrapper.bind(this)
|
||||
this._report = this._report.bind(this)
|
||||
}
|
||||
|
||||
configure({ toMails, toXmpp }) {
|
||||
@@ -146,72 +224,174 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
test({ runId }) {
|
||||
return this._backupNgListener(undefined, undefined, undefined, runId)
|
||||
return this._report(runId, undefined, true)
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper(status, job, schedule, runJobId) {
|
||||
if (job.type === 'metadataBackup') {
|
||||
return
|
||||
}
|
||||
async _report(runJobId, { type, status } = {}, force) {
|
||||
const xo = this._xo
|
||||
try {
|
||||
if (type === 'call') {
|
||||
return this._legacyVmHandler(status)
|
||||
}
|
||||
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule, runJobId)
|
||||
: this._listener(status, job, schedule, runJobId)
|
||||
)
|
||||
).catch(logError)
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
}
|
||||
|
||||
const reportWhen = log.data.reportWhen
|
||||
if (
|
||||
!force &&
|
||||
(reportWhen === 'never' ||
|
||||
// Handle improper value introduced by:
|
||||
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
|
||||
reportWhen === 'Never' ||
|
||||
(reportWhen === 'failure' && log.status === 'success'))
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const [job, schedule] = await Promise.all([
|
||||
await xo.getJob(log.jobId),
|
||||
await xo.getSchedule(log.scheduleId).catch(error => {
|
||||
logger.warn(error)
|
||||
}),
|
||||
])
|
||||
|
||||
if (job.type === 'backup') {
|
||||
return this._ngVmHandler(log, job, schedule, force)
|
||||
} else if (job.type === 'metadataBackup') {
|
||||
return this._metadataHandler(log, job, schedule, force)
|
||||
}
|
||||
|
||||
throw new Error(`Unknown backup job type: ${job.type}`)
|
||||
} catch (error) {
|
||||
logger.warn(error)
|
||||
}
|
||||
}
|
||||
|
||||
async _backupNgListener(_1, _2, schedule, runJobId) {
|
||||
async _metadataHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const tasksByStatus = groupBy(log.tasks, 'status')
|
||||
const n = log.tasks?.length ?? 0
|
||||
const nSuccesses = tasksByStatus.success?.length ?? 0
|
||||
|
||||
if (!force && log.data.reportWhen === 'failure') {
|
||||
delete tasksByStatus.success
|
||||
}
|
||||
|
||||
// header
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Job name**: ${jobName}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
getErrorMarkdown(log),
|
||||
]
|
||||
|
||||
const nagiosText = []
|
||||
|
||||
// body
|
||||
for (const status of STATUS) {
|
||||
const tasks = tasksByStatus[status]
|
||||
if (tasks === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
// tasks header
|
||||
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
|
||||
|
||||
// tasks body
|
||||
for (const task of tasks) {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const { title, body } = taskMarkdown
|
||||
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
|
||||
|
||||
if (task.status !== 'success') {
|
||||
nagiosText.push(`[${task.status}] ${title}`)
|
||||
}
|
||||
|
||||
for (const subTask of task.tasks ?? []) {
|
||||
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTask.status]
|
||||
const { title, body } = taskMarkdown
|
||||
subMarkdown.push([
|
||||
`- **${title}** ${icon}`,
|
||||
[...body, ...getWarningsMarkdown(subTask.warnings)],
|
||||
])
|
||||
}
|
||||
markdown.push('', '', `### ${title}`, ...subMarkdown)
|
||||
}
|
||||
}
|
||||
|
||||
// footer
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${log.status} − Metadata backup report for ${
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${
|
||||
log.jobName
|
||||
}`
|
||||
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
|
||||
log.jobName
|
||||
} - ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
if (
|
||||
reportWhen === 'never' ||
|
||||
(log.status === 'success' && reportWhen === 'failure')
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
if (schedule === undefined) {
|
||||
schedule = await xo.getSchedule(log.scheduleId)
|
||||
}
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const jobName = (await xo.getJob(log.jobId, 'backup')).name
|
||||
const formatDate = createDateFormater(schedule.timezone)
|
||||
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
|
||||
|
||||
if (
|
||||
(log.status === 'failure' || log.status === 'skipped') &&
|
||||
log.result !== undefined
|
||||
) {
|
||||
let markdown = [
|
||||
if (log.tasks === undefined) {
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
`- **Error**: ${log.result.message}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
getErrorMarkdown(log),
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
@@ -231,7 +411,7 @@ class BackupReportsXoPlugin {
|
||||
let nSkipped = 0
|
||||
let nInterrupted = 0
|
||||
for (const taskLog of log.tasks) {
|
||||
if (taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -244,16 +424,16 @@ class BackupReportsXoPlugin {
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
|
||||
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
|
||||
...getWarningsMarkdown(taskLog.warnings),
|
||||
]
|
||||
addWarnings(text, taskLog.warnings)
|
||||
|
||||
const failedSubTasks = []
|
||||
const snapshotText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
|
||||
for (const subTaskLog of taskLog.tasks || []) {
|
||||
for (const subTaskLog of taskLog.tasks ?? []) {
|
||||
if (
|
||||
subTaskLog.message !== 'export' &&
|
||||
subTaskLog.message !== 'snapshot'
|
||||
@@ -262,29 +442,36 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTaskLog.status]
|
||||
const errorMessage = ` - **Error**: ${get(
|
||||
subTaskLog.result,
|
||||
'message'
|
||||
)}`
|
||||
const type = subTaskLog.data?.type
|
||||
const errorMarkdown = getErrorMarkdown(subTaskLog)
|
||||
|
||||
if (subTaskLog.message === 'snapshot') {
|
||||
snapshotText.push(
|
||||
`- **Snapshot** ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
|
||||
)
|
||||
} else if (subTaskLog.data.type === 'remote') {
|
||||
snapshotText.push(`- **Snapshot** ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
])
|
||||
} else if (type === 'remote') {
|
||||
const id = subTaskLog.data.id
|
||||
const remote = await xo.getRemote(id).catch(() => {})
|
||||
remotesText.push(
|
||||
` - **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${id}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(remotesText, subTaskLog.warnings, 2)
|
||||
const remote = await xo.getRemote(id).catch(error => {
|
||||
logger.warn(error)
|
||||
})
|
||||
const title = remote !== undefined ? remote.name : `Remote Not found`
|
||||
|
||||
remotesText.push(`- **${title}** (${id}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : id)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const id = subTaskLog.data.id
|
||||
@@ -294,14 +481,17 @@ class BackupReportsXoPlugin {
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
|
||||
srsText.push(
|
||||
` - **${srName}** (${srUuid}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(srsText, subTaskLog.warnings, 2)
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,53 +503,48 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const operationInfoText = []
|
||||
addWarnings(operationInfoText, operationLog.warnings, 3)
|
||||
if (operationLog.status === 'success') {
|
||||
const size = operationLog.result.size
|
||||
const size = operationLog.result?.size
|
||||
if (size > 0) {
|
||||
if (operationLog.message === 'merge') {
|
||||
globalMergeSize += size
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
}
|
||||
|
||||
operationInfoText.push(
|
||||
` - **Size**: ${formatSize(size)}`,
|
||||
` - **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`
|
||||
)
|
||||
} else if (get(operationLog.result, 'message') !== undefined) {
|
||||
operationInfoText.push(
|
||||
` - **Error**: ${get(operationLog.result, 'message')}`
|
||||
)
|
||||
}
|
||||
const operationText = [
|
||||
` - **${operationLog.message}** ${
|
||||
STATUS_ICON[operationLog.status]
|
||||
}`,
|
||||
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
|
||||
...operationInfoText,
|
||||
].join('\n')
|
||||
if (get(subTaskLog, 'data.type') === 'remote') {
|
||||
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
|
||||
[
|
||||
...getTemporalDataMarkdown(
|
||||
operationLog.end,
|
||||
operationLog.start,
|
||||
formatDate
|
||||
),
|
||||
size > 0 && `- **Size**: ${formatSize(size)}`,
|
||||
size > 0 &&
|
||||
`- **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
...getWarningsMarkdown(operationLog.warnings),
|
||||
getErrorMarkdown(operationLog),
|
||||
],
|
||||
]
|
||||
if (type === 'remote') {
|
||||
remotesText.push(operationText)
|
||||
remotesText.join('\n')
|
||||
}
|
||||
if (get(subTaskLog, 'data.type') === 'SR') {
|
||||
} else if (type === 'SR') {
|
||||
srsText.push(operationText)
|
||||
srsText.join('\n')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`- **SRs**`)
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`- **Remotes**`)
|
||||
}
|
||||
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
|
||||
const subText = [
|
||||
...snapshotText,
|
||||
srsText.length !== 0 && `- **SRs**`,
|
||||
srsText,
|
||||
remotesText.length !== 0 && `- **Remotes**`,
|
||||
remotesText,
|
||||
]
|
||||
if (taskLog.result !== undefined) {
|
||||
if (taskLog.status === 'skipped') {
|
||||
++nSkipped
|
||||
@@ -369,8 +554,7 @@ class BackupReportsXoPlugin {
|
||||
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: taskLog.result.message
|
||||
}`,
|
||||
''
|
||||
}`
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -379,11 +563,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(
|
||||
...text,
|
||||
`- **Error**: ${taskLog.result.message}`,
|
||||
''
|
||||
)
|
||||
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -394,7 +574,7 @@ class BackupReportsXoPlugin {
|
||||
} else {
|
||||
if (taskLog.status === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
failedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
@@ -402,37 +582,34 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else if (taskLog.status === 'interrupted') {
|
||||
++nInterrupted
|
||||
interruptedVmsText.push(...text, '', '', ...subText, '')
|
||||
interruptedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
successfulVmsText.push(...text, ...subText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nVms = log.tasks.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
|
||||
let markdown = [
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
globalTransferSize !== 0 &&
|
||||
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
|
||||
globalMergeSize !== 0 &&
|
||||
`- **Merge size**: ${formatSize(globalMergeSize)}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'',
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
@@ -457,7 +634,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
@@ -468,9 +645,8 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
@@ -510,9 +686,9 @@ class BackupReportsXoPlugin {
|
||||
])
|
||||
}
|
||||
|
||||
_listener(status) {
|
||||
_legacyVmHandler(status) {
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const formatDate = createDateFormatter(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.4.1"
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -44,5 +44,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -42,5 +42,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
3
packages/xo-server-sdn-controller/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
43
packages/xo-server-sdn-controller/README.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# xo-server-sdn-controller [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||
XO Server plugin that allows the creation of pool-wide private networks.
|
||||
|
||||
## Install
|
||||
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
### Network creation
|
||||
|
||||
In the network creation view, select a `pool` and `Private network`.
|
||||
Create the network.
|
||||
|
||||
Choice is offer between `GRE` and `VxLAN`, if `VxLAN` is chosen, then the port 4789 must be open for UDP traffic.
|
||||
The following line needs to be added, if not already present, in `/etc/sysconfig/iptables` of all the hosts where `VxLAN` is wanted:
|
||||
`-A xapi-INPUT -p udp -m conntrack --ctstate NEW -m udp --dport 4789 -j ACCEPT`
|
||||
|
||||
### Configuration
|
||||
|
||||
Like all other xo-server plugins, it can be configured directly via
|
||||
the web interface, see [the plugin documentation](https://xen-orchestra.com/docs/plugins.html).
|
||||
|
||||
The plugin's configuration contains:
|
||||
- `cert-dir`: A path where to find the certificates to create SSL connections with the hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.
|
||||
- `override-certs:` Whether or not to uninstall an already existing SDN controller CA certificate in order to replace it by the plugin's one.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are *very* welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
AGPL3 © [Vates SAS](http://vates.fr)
|
||||
35
packages/xo-server-sdn-controller/package.json
Normal file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"name": "xo-server-sdn-controller",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-sdn-controller",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-sdn-controller",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"main": "./dist",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"version": "0.0.0",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.4.4",
|
||||
"@babel/core": "^7.4.4",
|
||||
"@babel/preset-env": "^7.4.4",
|
||||
"cross-env": "^5.2.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"lodash": "^4.17.11",
|
||||
"node-openssl-cert": "^0.0.81",
|
||||
"promise-toolbox": "^0.13.0"
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
830
packages/xo-server-sdn-controller/src/index.js
Normal file
@@ -0,0 +1,830 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import NodeOpenssl from 'node-openssl-cert'
|
||||
import { access, constants, readFile, writeFile } from 'fs'
|
||||
import { EventEmitter } from 'events'
|
||||
import { filter, find, forOwn, map } from 'lodash'
|
||||
import { fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { join } from 'path'
|
||||
|
||||
import { OvsdbClient } from './ovsdb-client'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller')
|
||||
|
||||
const PROTOCOL = 'pssl'
|
||||
|
||||
const CA_CERT = 'ca-cert.pem'
|
||||
const CLIENT_KEY = 'client-key.pem'
|
||||
const CLIENT_CERT = 'client-cert.pem'
|
||||
|
||||
const SDN_CONTROLLER_CERT = 'sdn-controller-ca.pem'
|
||||
|
||||
const NB_DAYS = 9999
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
'cert-dir': {
|
||||
description: `Full path to a directory where to find: \`client-cert.pem\`,
|
||||
\`client-key.pem\` and \`ca-cert.pem\` to create ssl connections with hosts.
|
||||
If none is provided, the plugin will create its own self-signed certificates.`,
|
||||
|
||||
type: 'string',
|
||||
},
|
||||
'override-certs': {
|
||||
description: `Replace already existing SDN controller CA certificate`,
|
||||
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
async function fileWrite(path, data) {
|
||||
await fromCallback(writeFile, path, data)
|
||||
log.debug(`${path} successfully written`)
|
||||
}
|
||||
|
||||
async function fileRead(path) {
|
||||
const result = await fromCallback(readFile, path)
|
||||
return result
|
||||
}
|
||||
|
||||
async function fileExists(path) {
|
||||
try {
|
||||
await fromCallback(access, path, constants.F_OK)
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
return false
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
||||
class SDNController extends EventEmitter {
|
||||
constructor({ xo, getDataDir }) {
|
||||
super()
|
||||
|
||||
this._xo = xo
|
||||
|
||||
this._getDataDir = getDataDir
|
||||
|
||||
this._clientKey = null
|
||||
this._clientCert = null
|
||||
this._caCert = null
|
||||
|
||||
this._poolNetworks = []
|
||||
this._ovsdbClients = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks = new Map()
|
||||
this._starCenters = new Map()
|
||||
|
||||
this._cleaners = []
|
||||
this._objectsAdded = this._objectsAdded.bind(this)
|
||||
this._objectsUpdated = this._objectsUpdated.bind(this)
|
||||
|
||||
this._overrideCerts = false
|
||||
|
||||
this._unsetApiMethod = null
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async configure(configuration) {
|
||||
this._overrideCerts = configuration['override-certs']
|
||||
let certDirectory = configuration['cert-dir']
|
||||
|
||||
if (certDirectory == null) {
|
||||
log.debug(`No cert-dir provided, using default self-signed certificates`)
|
||||
certDirectory = await this._getDataDir()
|
||||
|
||||
if (!(await fileExists(join(certDirectory, CA_CERT)))) {
|
||||
// If one certificate doesn't exist, none should
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_KEY))),
|
||||
`${CLIENT_KEY} should not exist`
|
||||
)
|
||||
assert(
|
||||
!(await fileExists(join(certDirectory, CLIENT_CERT))),
|
||||
`${CLIENT_CERT} should not exist`
|
||||
)
|
||||
|
||||
log.debug(`No default self-signed certificates exists, creating them`)
|
||||
await this._generateCertificatesAndKey(certDirectory)
|
||||
}
|
||||
}
|
||||
// TODO: verify certificates and create new certificates if needed
|
||||
|
||||
;[this._clientKey, this._clientCert, this._caCert] = await Promise.all([
|
||||
fileRead(join(certDirectory, CLIENT_KEY)),
|
||||
fileRead(join(certDirectory, CLIENT_CERT)),
|
||||
fileRead(join(certDirectory, CA_CERT)),
|
||||
])
|
||||
|
||||
this._ovsdbClients.forEach(client => {
|
||||
client.updateCertificates(this._clientKey, this._clientCert, this._caCert)
|
||||
})
|
||||
const updatedPools = []
|
||||
for (let i = 0; i < this._poolNetworks.length; ++i) {
|
||||
const poolNetwork = this._poolNetworks[i]
|
||||
if (updatedPools.includes(poolNetwork.pool)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const xapi = this._xo.getXapi(poolNetwork.pool)
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
updatedPools.push(poolNetwork.pool)
|
||||
}
|
||||
}
|
||||
|
||||
async load() {
|
||||
const createPrivateNetwork = this._createPrivateNetwork.bind(this)
|
||||
createPrivateNetwork.description =
|
||||
'Creates a pool-wide private network on a selected pool'
|
||||
createPrivateNetwork.params = {
|
||||
poolId: { type: 'string' },
|
||||
networkName: { type: 'string' },
|
||||
networkDescription: { type: 'string' },
|
||||
encapsulation: { type: 'string' },
|
||||
}
|
||||
createPrivateNetwork.resolve = {
|
||||
xoPool: ['poolId', 'pool', ''],
|
||||
}
|
||||
this._unsetApiMethod = this._xo.addApiMethod(
|
||||
'plugin.SDNController.createPrivateNetwork',
|
||||
createPrivateNetwork
|
||||
)
|
||||
|
||||
// FIXME: we should monitor when xapis are added/removed
|
||||
forOwn(this._xo.getAllXapis(), async xapi => {
|
||||
await xapi.objectsFetched
|
||||
|
||||
if (this._setControllerNeeded(xapi) === false) {
|
||||
this._cleaners.push(await this._manageXapi(xapi))
|
||||
|
||||
const hosts = filter(xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
|
||||
// Add already existing pool-wide private networks
|
||||
const networks = filter(xapi.objects.all, { $type: 'network' })
|
||||
forOwn(networks, async network => {
|
||||
if (network.other_config.private_pool_wide === 'true') {
|
||||
log.debug(
|
||||
`Adding network: '${network.name_label}' for pool: '${
|
||||
network.$pool.name_label
|
||||
}' to managed networks`
|
||||
)
|
||||
const center = await this._electNewCenter(network, true)
|
||||
this._poolNetworks.push({
|
||||
pool: network.$pool.$ref,
|
||||
network: network.$ref,
|
||||
starCenter: center ? center.$ref : null,
|
||||
})
|
||||
this._networks.set(network.$id, network.$ref)
|
||||
if (center != null) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async unload() {
|
||||
this._ovsdbClients = []
|
||||
this._poolNetworks = []
|
||||
this._newHosts = []
|
||||
|
||||
this._networks.clear()
|
||||
this._starCenters.clear()
|
||||
|
||||
this._cleaners.forEach(cleaner => cleaner())
|
||||
this._cleaners = []
|
||||
|
||||
this._unsetApiMethod()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
async _createPrivateNetwork({
|
||||
xoPool,
|
||||
networkName,
|
||||
networkDescription,
|
||||
encapsulation,
|
||||
}) {
|
||||
const pool = this._xo.getXapiObject(xoPool)
|
||||
await this._setPoolControllerIfNeeded(pool)
|
||||
|
||||
// Create the private network
|
||||
const privateNetworkRef = await pool.$xapi.call('network.create', {
|
||||
name_label: networkName,
|
||||
name_description: networkDescription,
|
||||
MTU: 0,
|
||||
other_config: {
|
||||
automatic: 'false',
|
||||
private_pool_wide: 'true',
|
||||
encapsulation: encapsulation,
|
||||
},
|
||||
})
|
||||
|
||||
const privateNetwork = await pool.$xapi._getOrWaitObject(privateNetworkRef)
|
||||
|
||||
log.info(
|
||||
`Private network '${
|
||||
privateNetwork.name_label
|
||||
}' has been created for pool '${pool.name_label}'`
|
||||
)
|
||||
|
||||
// For each pool's host, create a tunnel to the private network
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
await this._createTunnel(host, privateNetwork)
|
||||
this._createOvsdbClient(host)
|
||||
})
|
||||
)
|
||||
|
||||
const center = await this._electNewCenter(privateNetwork, false)
|
||||
this._poolNetworks.push({
|
||||
pool: pool.$ref,
|
||||
network: privateNetwork.$ref,
|
||||
starCenter: center ? center.$ref : null,
|
||||
encapsulation: encapsulation,
|
||||
})
|
||||
this._networks.set(privateNetwork.$id, privateNetwork.$ref)
|
||||
if (center != null) {
|
||||
this._starCenters.set(center.$id, center.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _manageXapi(xapi) {
|
||||
const { objects } = xapi
|
||||
|
||||
const objectsRemovedXapi = this._objectsRemoved.bind(this, xapi)
|
||||
objects.on('add', this._objectsAdded)
|
||||
objects.on('update', this._objectsUpdated)
|
||||
objects.on('remove', objectsRemovedXapi)
|
||||
|
||||
await this._installCaCertificateIfNeeded(xapi)
|
||||
|
||||
return () => {
|
||||
objects.removeListener('add', this._objectsAdded)
|
||||
objects.removeListener('update', this._objectsUpdated)
|
||||
objects.removeListener('remove', objectsRemovedXapi)
|
||||
}
|
||||
}
|
||||
|
||||
async _objectsAdded(objects) {
|
||||
await Promise.all(
|
||||
map(objects, async object => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'host') {
|
||||
log.debug(
|
||||
`New host: '${object.name_label}' in pool: '${
|
||||
object.$pool.name_label
|
||||
}'`
|
||||
)
|
||||
|
||||
if (find(this._newHosts, { $ref: object.$ref }) == null) {
|
||||
this._newHosts.push(object)
|
||||
}
|
||||
this._createOvsdbClient(object)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _objectsUpdated(objects) {
|
||||
await Promise.all(
|
||||
map(objects, async (object, id) => {
|
||||
const { $type } = object
|
||||
|
||||
if ($type === 'PIF') {
|
||||
await this._pifUpdated(object)
|
||||
} else if ($type === 'host') {
|
||||
await this._hostUpdated(object)
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _objectsRemoved(xapi, objects) {
|
||||
await Promise.all(
|
||||
map(objects, async (object, id) => {
|
||||
const client = find(this._ovsdbClients, { id: id })
|
||||
if (client != null) {
|
||||
this._ovsdbClients.splice(this._ovsdbClients.indexOf(client), 1)
|
||||
}
|
||||
|
||||
// If a Star center host is removed: re-elect a new center where needed
|
||||
const starCenterRef = this._starCenters.get(id)
|
||||
if (starCenterRef != null) {
|
||||
this._starCenters.delete(id)
|
||||
const poolNetworks = filter(this._poolNetworks, {
|
||||
starCenter: starCenterRef,
|
||||
})
|
||||
for (let i = 0; i < poolNetworks.length; ++i) {
|
||||
const poolNetwork = poolNetworks[i]
|
||||
const network = await xapi._getOrWaitObject(poolNetwork.network)
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
|
||||
if (newCenter != null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If a network is removed, clean this._poolNetworks from it
|
||||
const networkRef = this._networks.get(id)
|
||||
if (networkRef != null) {
|
||||
this._networks.delete(id)
|
||||
const poolNetwork = find(this._poolNetworks, {
|
||||
network: networkRef,
|
||||
})
|
||||
if (poolNetwork != null) {
|
||||
this._poolNetworks.splice(
|
||||
this._poolNetworks.indexOf(poolNetwork),
|
||||
1
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _pifUpdated(pif) {
|
||||
// Only if PIF is in a private network
|
||||
const poolNetwork = find(this._poolNetworks, { network: pif.network })
|
||||
if (poolNetwork == null) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!pif.currently_attached) {
|
||||
if (poolNetwork.starCenter !== pif.host) {
|
||||
return
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`PIF: '${pif.device}' of network: '${
|
||||
pif.$network.name_label
|
||||
}' star-center host: '${
|
||||
pif.$host.name_label
|
||||
}' has been unplugged, electing a new host`
|
||||
)
|
||||
const newCenter = await this._electNewCenter(pif.$network, true)
|
||||
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
|
||||
this._starCenters.delete(pif.$host.$id)
|
||||
if (newCenter != null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
} else {
|
||||
if (poolNetwork.starCenter == null) {
|
||||
const host = pif.$host
|
||||
log.debug(
|
||||
`First available host: '${
|
||||
host.name_label
|
||||
}' becomes star center of network: '${pif.$network.name_label}'`
|
||||
)
|
||||
poolNetwork.starCenter = pif.host
|
||||
this._starCenters.set(host.$id, host.$ref)
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`PIF: '${pif.device}' of network: '${pif.$network.name_label}' host: '${
|
||||
pif.$host.name_label
|
||||
}' has been plugged`
|
||||
)
|
||||
|
||||
const starCenter = await pif.$xapi._getOrWaitObject(
|
||||
poolNetwork.starCenter
|
||||
)
|
||||
await this._addHostToNetwork(pif.$host, pif.$network, starCenter)
|
||||
}
|
||||
}
|
||||
|
||||
async _hostUpdated(host) {
|
||||
const xapi = host.$xapi
|
||||
|
||||
if (host.enabled) {
|
||||
if (host.PIFs.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
|
||||
const newHost = find(this._newHosts, { $ref: host.$ref })
|
||||
if (newHost != null) {
|
||||
this._newHosts.splice(this._newHosts.indexOf(newHost), 1)
|
||||
try {
|
||||
await xapi.call('pool.certificate_sync')
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Couldn't sync SDN controller ca certificate in pool: '${
|
||||
host.$pool.name_label
|
||||
}' because: ${error}`
|
||||
)
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < tunnels.length; ++i) {
|
||||
const tunnel = tunnels[i]
|
||||
const accessPIF = await xapi._getOrWaitObject(tunnel.access_PIF)
|
||||
if (accessPIF.host !== host.$ref) {
|
||||
continue
|
||||
}
|
||||
|
||||
const poolNetwork = find(this._poolNetworks, {
|
||||
network: accessPIF.network,
|
||||
})
|
||||
if (poolNetwork == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (accessPIF.currently_attached) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`Pluging PIF: '${accessPIF.device}' for host: '${
|
||||
host.name_label
|
||||
}' on network: '${accessPIF.$network.name_label}'`
|
||||
)
|
||||
try {
|
||||
await xapi.call('PIF.plug', accessPIF.$ref)
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`XAPI error while pluging PIF: '${accessPIF.device}' on host: '${
|
||||
host.name_label
|
||||
}' for network: '${accessPIF.$network.name_label}'`
|
||||
)
|
||||
}
|
||||
|
||||
const starCenter = await host.$xapi._getOrWaitObject(
|
||||
poolNetwork.starCenter
|
||||
)
|
||||
await this._addHostToNetwork(host, accessPIF.$network, starCenter)
|
||||
}
|
||||
} else {
|
||||
const poolNetworks = filter(this._poolNetworks, { starCenter: host.$ref })
|
||||
for (let i = 0; i < poolNetworks.length; ++i) {
|
||||
const poolNetwork = poolNetworks[i]
|
||||
const network = await host.$xapi._getOrWaitObject(poolNetwork.network)
|
||||
log.debug(
|
||||
`Star center host: '${host.name_label}' of network: '${
|
||||
network.name_label
|
||||
}' in pool: '${
|
||||
host.$pool.name_label
|
||||
}' is no longer reachable, electing a new host`
|
||||
)
|
||||
|
||||
const newCenter = await this._electNewCenter(network, true)
|
||||
poolNetwork.starCenter = newCenter ? newCenter.$ref : null
|
||||
this._starCenters.delete(host.$id)
|
||||
if (newCenter != null) {
|
||||
this._starCenters.set(newCenter.$id, newCenter.$ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _setPoolControllerIfNeeded(pool) {
|
||||
if (!this._setControllerNeeded(pool.$xapi)) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const controller = find(pool.$xapi.objects.all, { $type: 'SDN_controller' })
|
||||
if (controller != null) {
|
||||
await pool.$xapi.call('SDN_controller.forget', controller.$ref)
|
||||
log.debug(`Remove old SDN controller from pool: '${pool.name_label}'`)
|
||||
}
|
||||
|
||||
await pool.$xapi.call('SDN_controller.introduce', PROTOCOL)
|
||||
log.debug(`Set SDN controller of pool: '${pool.name_label}'`)
|
||||
this._cleaners.push(await this._manageXapi(pool.$xapi))
|
||||
}
|
||||
|
||||
_setControllerNeeded(xapi) {
|
||||
const controller = find(xapi.objects.all, { $type: 'SDN_controller' })
|
||||
return !(
|
||||
controller != null &&
|
||||
controller.protocol === PROTOCOL &&
|
||||
controller.address === '' &&
|
||||
controller.port === 0
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _installCaCertificateIfNeeded(xapi) {
|
||||
let needInstall = false
|
||||
try {
|
||||
const result = await xapi.call('pool.certificate_list')
|
||||
if (!result.includes(SDN_CONTROLLER_CERT)) {
|
||||
needInstall = true
|
||||
} else if (this._overrideCerts) {
|
||||
await xapi.call('pool.certificate_uninstall', SDN_CONTROLLER_CERT)
|
||||
log.debug(
|
||||
`Old SDN Controller CA certificate uninstalled on pool: '${
|
||||
xapi.pool.name_label
|
||||
}'`
|
||||
)
|
||||
needInstall = true
|
||||
}
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Couldn't retrieve certificate list of pool: '${xapi.pool.name_label}'`
|
||||
)
|
||||
}
|
||||
if (!needInstall) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await xapi.call(
|
||||
'pool.certificate_install',
|
||||
SDN_CONTROLLER_CERT,
|
||||
this._caCert.toString()
|
||||
)
|
||||
await xapi.call('pool.certificate_sync')
|
||||
log.debug(
|
||||
`SDN controller CA certificate install in pool: '${
|
||||
xapi.pool.name_label
|
||||
}'`
|
||||
)
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Couldn't install SDN controller CA certificate in pool: '${
|
||||
xapi.pool.name_label
|
||||
}' because: ${error}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _electNewCenter(network, resetNeeded) {
|
||||
const pool = network.$pool
|
||||
|
||||
let newCenter = null
|
||||
const hosts = filter(pool.$xapi.objects.all, { $type: 'host' })
|
||||
await Promise.all(
|
||||
map(hosts, async host => {
|
||||
if (resetNeeded) {
|
||||
// Clean old ports and interfaces
|
||||
const hostClient = find(this._ovsdbClients, { host: host.$ref })
|
||||
if (hostClient != null) {
|
||||
try {
|
||||
await hostClient.resetForNetwork(network.uuid, network.name_label)
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Couldn't reset network: '${network.name_label}' for host: '${
|
||||
host.name_label
|
||||
}' in pool: '${network.$pool.name_label}' because: ${error}`
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (newCenter != null) {
|
||||
return
|
||||
}
|
||||
|
||||
const pif = find(host.$PIFs, { network: network.$ref })
|
||||
if (pif != null && pif.currently_attached && host.enabled) {
|
||||
newCenter = host
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
if (newCenter == null) {
|
||||
log.error(
|
||||
`Unable to elect a new star-center host to network: '${
|
||||
network.name_label
|
||||
}' for pool: '${
|
||||
network.$pool.name_label
|
||||
}' because there's no available host`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Recreate star topology
|
||||
await Promise.all(
|
||||
await map(hosts, async host => {
|
||||
await this._addHostToNetwork(host, network, newCenter)
|
||||
})
|
||||
)
|
||||
|
||||
log.info(
|
||||
`New star center host elected: '${newCenter.name_label}' in network: '${
|
||||
network.name_label
|
||||
}'`
|
||||
)
|
||||
|
||||
return newCenter
|
||||
}
|
||||
|
||||
async _createTunnel(host, network) {
|
||||
const pif = find(host.$PIFs, { physical: true })
|
||||
if (pif == null) {
|
||||
log.error(
|
||||
`No PIF found to create tunnel on host: '${
|
||||
host.name_label
|
||||
}' for network: '${network.name_label}'`
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
await host.$xapi.call('tunnel.create', pif.$ref, network.$ref)
|
||||
log.debug(
|
||||
`Tunnel added on host '${host.name_label}' for network '${
|
||||
network.name_label
|
||||
}'`
|
||||
)
|
||||
}
|
||||
|
||||
async _addHostToNetwork(host, network, starCenter) {
|
||||
if (host.$ref === starCenter.$ref) {
|
||||
// Nothing to do
|
||||
return
|
||||
}
|
||||
|
||||
const hostClient = find(this._ovsdbClients, {
|
||||
host: host.$ref,
|
||||
})
|
||||
if (hostClient == null) {
|
||||
log.error(`No OVSDB client found for host: '${host.name_label}'`)
|
||||
return
|
||||
}
|
||||
|
||||
const starCenterClient = find(this._ovsdbClients, {
|
||||
host: starCenter.$ref,
|
||||
})
|
||||
if (starCenterClient == null) {
|
||||
log.error(
|
||||
`No OVSDB client found for star-center host: '${starCenter.name_label}'`
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
const encapsulation =
|
||||
network.other_config.encapsulation != null
|
||||
? network.other_config.encapsulation
|
||||
: 'gre'
|
||||
|
||||
try {
|
||||
await hostClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
starCenterClient.address,
|
||||
encapsulation
|
||||
)
|
||||
await starCenterClient.addInterfaceAndPort(
|
||||
network.uuid,
|
||||
network.name_label,
|
||||
hostClient.address,
|
||||
encapsulation
|
||||
)
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`Couldn't add host: '${host.name_label}' to network: '${
|
||||
network.name_label
|
||||
}' in pool: '${host.$pool.name_label}' because: ${error}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
_createOvsdbClient(host) {
|
||||
const foundClient = find(this._ovsdbClients, { host: host.$ref })
|
||||
if (foundClient != null) {
|
||||
return foundClient
|
||||
}
|
||||
|
||||
const client = new OvsdbClient(
|
||||
host,
|
||||
this._clientKey,
|
||||
this._clientCert,
|
||||
this._caCert
|
||||
)
|
||||
this._ovsdbClients.push(client)
|
||||
return client
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _generateCertificatesAndKey(dataDir) {
|
||||
const openssl = new NodeOpenssl()
|
||||
|
||||
const rsakeyoptions = {
|
||||
rsa_keygen_bits: 4096,
|
||||
format: 'PKCS8',
|
||||
}
|
||||
const subject = {
|
||||
countryName: 'XX',
|
||||
localityName: 'Default City',
|
||||
organizationName: 'Default Company LTD',
|
||||
}
|
||||
const csroptions = {
|
||||
hash: 'sha256',
|
||||
startdate: new Date('1984-02-04 00:00:00'),
|
||||
enddate: new Date('2143-06-04 04:16:23'),
|
||||
subject: subject,
|
||||
}
|
||||
const cacsroptions = {
|
||||
hash: 'sha256',
|
||||
days: NB_DAYS,
|
||||
subject: subject,
|
||||
}
|
||||
|
||||
openssl.generateRSAPrivateKey(rsakeyoptions, (err, cakey, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while generating CA private key: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
openssl.generateCSR(cacsroptions, cakey, null, (err, csr, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while generating CA certificate: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
openssl.selfSignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
cakey,
|
||||
null,
|
||||
async (err, cacrt, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while signing CA certificate: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CA_CERT), cacrt)
|
||||
openssl.generateRSAPrivateKey(
|
||||
rsakeyoptions,
|
||||
async (err, key, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while generating private key: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_KEY), key)
|
||||
openssl.generateCSR(csroptions, key, null, (err, csr, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while generating certificate: ${err}`)
|
||||
return
|
||||
}
|
||||
openssl.CASignCSR(
|
||||
csr,
|
||||
cacsroptions,
|
||||
false,
|
||||
cacrt,
|
||||
cakey,
|
||||
null,
|
||||
async (err, crt, cmd) => {
|
||||
if (err) {
|
||||
log.error(`Error while signing certificate: ${err}`)
|
||||
return
|
||||
}
|
||||
|
||||
await fileWrite(join(dataDir, CLIENT_CERT), crt)
|
||||
this.emit('certWritten')
|
||||
}
|
||||
)
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
await fromEvent(this, 'certWritten', {})
|
||||
log.debug('All certificates have been successfully written')
|
||||
}
|
||||
}
|
||||
|
||||
export default opts => new SDNController(opts)
|
||||
511
packages/xo-server-sdn-controller/src/ovsdb-client.js
Normal file
@@ -0,0 +1,511 @@
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import { connect } from 'tls'
|
||||
|
||||
const log = createLogger('xo:xo-server:sdn-controller:ovsdb-client')
|
||||
|
||||
const OVSDB_PORT = 6640
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export class OvsdbClient {
|
||||
constructor(host, clientKey, clientCert, caCert) {
|
||||
this._host = host
|
||||
this._numberOfPortAndInterface = 0
|
||||
this._requestID = 0
|
||||
|
||||
this.updateCertificates(clientKey, clientCert, caCert)
|
||||
|
||||
log.debug(`[${this._host.name_label}] New OVSDB client`)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
get address() {
|
||||
return this._host.address
|
||||
}
|
||||
|
||||
get host() {
|
||||
return this._host.$ref
|
||||
}
|
||||
|
||||
get id() {
|
||||
return this._host.$id
|
||||
}
|
||||
|
||||
updateCertificates(clientKey, clientCert, caCert) {
|
||||
this._clientKey = clientKey
|
||||
this._clientCert = clientCert
|
||||
this._caCert = caCert
|
||||
|
||||
log.debug(`[${this._host.name_label}] Certificates have been updated`)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async addInterfaceAndPort(
|
||||
networkUuid,
|
||||
networkName,
|
||||
remoteAddress,
|
||||
encapsulation
|
||||
) {
|
||||
const socket = await this._connect()
|
||||
const index = this._numberOfPortAndInterface
|
||||
++this._numberOfPortAndInterface
|
||||
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const alreadyExist = await this._interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (alreadyExist) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const interfaceName = 'tunnel_iface' + index
|
||||
const portName = 'tunnel_port' + index
|
||||
|
||||
// Add interface and port to the bridge
|
||||
const options = ['map', [['remote_ip', remoteAddress]]]
|
||||
const addInterfaceOperation = {
|
||||
op: 'insert',
|
||||
table: 'Interface',
|
||||
row: {
|
||||
type: encapsulation,
|
||||
options: options,
|
||||
name: interfaceName,
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_iface',
|
||||
}
|
||||
const addPortOperation = {
|
||||
op: 'insert',
|
||||
table: 'Port',
|
||||
row: {
|
||||
name: portName,
|
||||
interfaces: ['set', [['named-uuid', 'new_iface']]],
|
||||
other_config: ['map', [['private_pool_wide', 'true']]],
|
||||
},
|
||||
'uuid-name': 'new_port',
|
||||
}
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'insert', ['set', [['named-uuid', 'new_port']]]]],
|
||||
}
|
||||
const params = [
|
||||
'Open_vSwitch',
|
||||
addInterfaceOperation,
|
||||
addPortOperation,
|
||||
mutateBridgeOperation,
|
||||
]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
let error
|
||||
let details
|
||||
let i = 0
|
||||
let opResult
|
||||
do {
|
||||
opResult = jsonObjects[0].result[i]
|
||||
if (opResult != null && opResult.error != null) {
|
||||
error = opResult.error
|
||||
details = opResult.details
|
||||
}
|
||||
++i
|
||||
} while (opResult && !error)
|
||||
|
||||
if (error != null) {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Error while adding port: '${portName}' and interface: '${interfaceName}' to bridge: '${bridgeName}' on network: '${networkName}' because: ${error}: ${details}`
|
||||
)
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Port: '${portName}' and interface: '${interfaceName}' added to bridge: '${bridgeName}' on network: '${networkName}'`
|
||||
)
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
async resetForNetwork(networkUuid, networkName) {
|
||||
const socket = await this._connect()
|
||||
const [bridgeUuid, bridgeName] = await this._getBridgeUuidForNetwork(
|
||||
networkUuid,
|
||||
networkName,
|
||||
socket
|
||||
)
|
||||
if (bridgeUuid == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
// Delete old ports created by a SDN controller
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
const portsToDelete = []
|
||||
for (let i = 0; i < ports.length; ++i) {
|
||||
const portUuid = ports[i][1]
|
||||
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'other_config'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
forOwn(selectResult.other_config[1], config => {
|
||||
if (config[0] === 'private_pool_wide' && config[1] === 'true') {
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Adding port: '${
|
||||
selectResult.name
|
||||
}' to delete list from bridge: '${bridgeName}'`
|
||||
)
|
||||
portsToDelete.push(['uuid', portUuid])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (portsToDelete.length === 0) {
|
||||
// Nothing to do
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
const mutateBridgeOperation = {
|
||||
op: 'mutate',
|
||||
table: 'Bridge',
|
||||
where: [['_uuid', '==', ['uuid', bridgeUuid]]],
|
||||
mutations: [['ports', 'delete', ['set', portsToDelete]]],
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', mutateBridgeOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
if (jsonObjects[0].error != null) {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Couldn't delete ports from bridge: '${bridgeName}' because: ${
|
||||
jsonObjects.error
|
||||
}`
|
||||
)
|
||||
socket.destroy()
|
||||
return
|
||||
}
|
||||
|
||||
log.debug(
|
||||
`[${this._host.name_label}] Deleted ${
|
||||
jsonObjects[0].result[0].count
|
||||
} ports from bridge: '${bridgeName}'`
|
||||
)
|
||||
socket.destroy()
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
_parseJson(chunk) {
|
||||
let data = chunk.toString()
|
||||
let buffer = ''
|
||||
let depth = 0
|
||||
let pos = 0
|
||||
const objects = []
|
||||
|
||||
for (let i = pos; i < data.length; ++i) {
|
||||
const c = data.charAt(i)
|
||||
if (c === '{') {
|
||||
depth++
|
||||
} else if (c === '}') {
|
||||
depth--
|
||||
if (depth === 0) {
|
||||
const object = JSON.parse(buffer + data.substr(0, i + 1))
|
||||
objects.push(object)
|
||||
buffer = ''
|
||||
data = data.substr(i + 1)
|
||||
pos = 0
|
||||
i = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buffer += data
|
||||
return objects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _getBridgeUuidForNetwork(networkUuid, networkName, socket) {
|
||||
const where = [
|
||||
[
|
||||
'external_ids',
|
||||
'includes',
|
||||
['map', [['xs-network-uuids', networkUuid]]],
|
||||
],
|
||||
]
|
||||
const selectResult = await this._select(
|
||||
'Bridge',
|
||||
['_uuid', 'name'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return [null, null]
|
||||
}
|
||||
|
||||
const bridgeUuid = selectResult._uuid[1]
|
||||
const bridgeName = selectResult.name
|
||||
log.debug(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Found bridge: '${bridgeName}' for network: '${networkName}'`
|
||||
)
|
||||
|
||||
return [bridgeUuid, bridgeName]
|
||||
}
|
||||
|
||||
async _interfaceAndPortAlreadyExist(
|
||||
bridgeUuid,
|
||||
bridgeName,
|
||||
remoteAddress,
|
||||
socket
|
||||
) {
|
||||
const ports = await this._getBridgePorts(bridgeUuid, bridgeName, socket)
|
||||
if (ports == null) {
|
||||
return
|
||||
}
|
||||
|
||||
for (let i = 0; i < ports.length; ++i) {
|
||||
const portUuid = ports[i][1]
|
||||
const interfaces = await this._getPortInterfaces(portUuid, socket)
|
||||
if (interfaces == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
let j
|
||||
for (j = 0; j < interfaces.length; ++j) {
|
||||
const interfaceUuid = interfaces[j][1]
|
||||
const hasRemote = await this._interfaceHasRemote(
|
||||
interfaceUuid,
|
||||
remoteAddress,
|
||||
socket
|
||||
)
|
||||
if (hasRemote === true) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
async _getBridgePorts(bridgeUuid, bridgeName, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', bridgeUuid]]]
|
||||
const selectResult = await this._select('Bridge', ['ports'], where, socket)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.ports[0] === 'set'
|
||||
? selectResult.ports[1]
|
||||
: [selectResult.ports]
|
||||
}
|
||||
|
||||
async _getPortInterfaces(portUuid, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', portUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Port',
|
||||
['name', 'interfaces'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return null
|
||||
}
|
||||
|
||||
return selectResult.interfaces[0] === 'set'
|
||||
? selectResult.interfaces[1]
|
||||
: [selectResult.interfaces]
|
||||
}
|
||||
|
||||
async _interfaceHasRemote(interfaceUuid, remoteAddress, socket) {
|
||||
const where = [['_uuid', '==', ['uuid', interfaceUuid]]]
|
||||
const selectResult = await this._select(
|
||||
'Interface',
|
||||
['name', 'options'],
|
||||
where,
|
||||
socket
|
||||
)
|
||||
if (selectResult == null) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (let i = 0; i < selectResult.options[1].length; ++i) {
|
||||
const option = selectResult.options[1][i]
|
||||
if (option[0] === 'remote_ip' && option[1] === remoteAddress) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _select(table, columns, where, socket) {
|
||||
const selectOperation = {
|
||||
op: 'select',
|
||||
table: table,
|
||||
columns: columns,
|
||||
where: where,
|
||||
}
|
||||
|
||||
const params = ['Open_vSwitch', selectOperation]
|
||||
const jsonObjects = await this._sendOvsdbTransaction(params, socket)
|
||||
if (jsonObjects == null) {
|
||||
return
|
||||
}
|
||||
const jsonResult = jsonObjects[0].result[0]
|
||||
if (jsonResult.error != null) {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Couldn't retrieve: '${columns}' in: '${table}' because: ${
|
||||
jsonResult.error
|
||||
}: ${jsonResult.details}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
if (jsonResult.rows.length === 0) {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] No '${columns}' found in: '${table}' where: '${where}'`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// For now all select operations should return only 1 row
|
||||
assert(
|
||||
jsonResult.rows.length === 1,
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] There should exactly 1 row when searching: '${columns}' in: '${table}' where: '${where}'`
|
||||
)
|
||||
|
||||
return jsonResult.rows[0]
|
||||
}
|
||||
|
||||
async _sendOvsdbTransaction(params, socket) {
|
||||
const stream = socket
|
||||
|
||||
const requestId = this._requestID
|
||||
++this._requestID
|
||||
const req = {
|
||||
id: requestId,
|
||||
method: 'transact',
|
||||
params: params,
|
||||
}
|
||||
|
||||
try {
|
||||
stream.write(JSON.stringify(req))
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`[${this._host.name_label}] Error while writing into stream: ${error}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
let result
|
||||
let jsonObjects
|
||||
let resultRequestId
|
||||
do {
|
||||
try {
|
||||
result = await fromEvent(stream, 'data', {})
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] Error while waiting for stream data: ${error}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
jsonObjects = this._parseJson(result)
|
||||
resultRequestId = jsonObjects[0].id
|
||||
} while (resultRequestId !== requestId)
|
||||
|
||||
return jsonObjects
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async _connect() {
|
||||
const options = {
|
||||
ca: this._caCert,
|
||||
key: this._clientKey,
|
||||
cert: this._clientCert,
|
||||
host: this._host.address,
|
||||
port: OVSDB_PORT,
|
||||
rejectUnauthorized: false,
|
||||
requestCert: false,
|
||||
}
|
||||
const socket = connect(options)
|
||||
|
||||
try {
|
||||
await fromEvent(socket, 'secureConnect', {})
|
||||
} catch (error) {
|
||||
log.error(
|
||||
`[${this._host.name_label}] TLS connection failed because: ${error}: ${
|
||||
error.code
|
||||
}`
|
||||
)
|
||||
throw error
|
||||
}
|
||||
|
||||
log.debug(`[${this._host.name_label}] TLS connection successful`)
|
||||
|
||||
socket.on('error', error => {
|
||||
log.error(
|
||||
`[${
|
||||
this._host.name_label
|
||||
}] OVSDB client socket error: ${error} with code: ${error.code}`
|
||||
)
|
||||
})
|
||||
|
||||
return socket
|
||||
}
|
||||
}
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"nodemailer": "^5.0.0",
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
@@ -59,5 +59,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,6 +49,17 @@ maxTokenValidity = '0.5 year'
|
||||
# Delay for which backups listing on a remote is cached
|
||||
listingDebounce = '1 min'
|
||||
|
||||
# Duration for which we can wait for the backup size before returning
|
||||
#
|
||||
# It should be short to avoid blocking the display of the available backups.
|
||||
vmBackupSizeTimeout = '2 seconds'
|
||||
|
||||
# Helmet handles HTTP security via headers
|
||||
#
|
||||
# https://helmetjs.github.io/docs/
|
||||
#[http.helmet.hsts]
|
||||
#includeSubDomains = false
|
||||
|
||||
[[http.listen]]
|
||||
port = 80
|
||||
|
||||
@@ -68,6 +79,7 @@ honorCipherOrder = true
|
||||
secureOptions = 117440512
|
||||
|
||||
[http.mounts]
|
||||
'/' = '../xo-web/dist'
|
||||
|
||||
[remoteOptions]
|
||||
mountsDir = '/run/xo-server/mounts'
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.38.1",
|
||||
"version": "5.43.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -37,11 +38,11 @@
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"@xen-orchestra/fs": "^0.9.0",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"@xen-orchestra/mixin": "^0.0.0",
|
||||
"ajv": "^6.1.1",
|
||||
"app-conf": "^0.6.1",
|
||||
"app-conf": "^0.7.0",
|
||||
"archiver": "^3.0.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^3.0.0",
|
||||
@@ -50,7 +51,7 @@
|
||||
"body-parser": "^1.18.2",
|
||||
"compression": "^1.7.3",
|
||||
"connect-flash": "^0.1.1",
|
||||
"cookie": "^0.3.1",
|
||||
"cookie": "^0.4.0",
|
||||
"cookie-parser": "^1.4.3",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"debug": "^4.0.1",
|
||||
@@ -64,7 +65,7 @@
|
||||
"express-session": "^1.15.6",
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"fs-extra": "^8.0.1",
|
||||
"get-stream": "^4.0.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.7.1",
|
||||
@@ -109,7 +110,7 @@
|
||||
"readable-stream": "^3.2.0",
|
||||
"redis": "^2.8.0",
|
||||
"schema-inspector": "^1.6.8",
|
||||
"semver": "^5.4.1",
|
||||
"semver": "^6.0.0",
|
||||
"serve-static": "^1.13.1",
|
||||
"split-lines": "^2.0.0",
|
||||
"stack-chain": "^2.0.0",
|
||||
@@ -117,18 +118,18 @@
|
||||
"struct-fu": "^1.2.0",
|
||||
"tar-stream": "^2.0.1",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.6.0",
|
||||
"vhd-lib": "^0.7.0",
|
||||
"ws": "^6.0.0",
|
||||
"xen-api": "^0.25.0",
|
||||
"xen-api": "^0.25.2",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-remote-parser": "^0.5.0",
|
||||
"xo-vmdk-to-vhd": "^0.1.6",
|
||||
"xo-vmdk-to-vhd": "^0.1.7",
|
||||
"yazl": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -117,7 +117,7 @@ port = 80
|
||||
|
||||
# List of files/directories which will be served.
|
||||
[http.mounts]
|
||||
#'/' = '/path/to/xo-web/dist/'
|
||||
#'/any/url' = '/path/to/directory'
|
||||
|
||||
# List of proxied URLs (HTTP & WebSockets).
|
||||
[http.proxies]
|
||||
|
||||
5
packages/xo-server/src/_XenStore.js
Normal file
@@ -0,0 +1,5 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
export const read = key =>
|
||||
fromCallback(cb => execFile('xenstore-read', [key], cb))
|
||||
@@ -183,6 +183,7 @@ getLogs.params = {
|
||||
after: { type: ['number', 'string'], optional: true },
|
||||
before: { type: ['number', 'string'], optional: true },
|
||||
limit: { type: 'number', optional: true },
|
||||
'*': { type: 'any' },
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import pump from 'pump'
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import { parseSize } from '../utils'
|
||||
@@ -128,7 +128,7 @@ async function handleImportContent(req, res, { xapi, id }) {
|
||||
res.end(format.response(0, true))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,26 +1,22 @@
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export async function set({
|
||||
host,
|
||||
multipathing,
|
||||
|
||||
// TODO: use camel case.
|
||||
multipathing,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
const hostId = host._xapiId
|
||||
host = this.getXapiObject(host)
|
||||
|
||||
if (multipathing !== undefined) {
|
||||
await xapi.setHostMultipathing(hostId, multipathing)
|
||||
}
|
||||
|
||||
return xapi.setHostProperties(hostId, {
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
})
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && host.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && host.set_name_label(nameLabel),
|
||||
multipathing !== undefined &&
|
||||
host.$xapi.setHostMultipathing(host.$id, multipathing),
|
||||
])
|
||||
}
|
||||
|
||||
set.description = 'changes the properties of an host'
|
||||
@@ -215,6 +211,25 @@ emergencyShutdownHost.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function isHostServerTimeConsistent({ host }) {
|
||||
try {
|
||||
await this.getXapi(host).assertConsistentHostServerTime(host._xapiRef)
|
||||
return true
|
||||
} catch (e) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
isHostServerTimeConsistent.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
isHostServerTimeConsistent.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats({ host, granularity }) {
|
||||
return this.getXapiHostStats(host._xapiId, granularity)
|
||||
}
|
||||
@@ -248,7 +263,7 @@ async function handleInstallSupplementalPack(req, res, { hostId }) {
|
||||
res.end(format.response(0))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,3 +284,19 @@ installSupplementalPack.params = {
|
||||
installSupplementalPack.resolve = {
|
||||
host: ['host', 'host', 'admin'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function isHyperThreadingEnabled({ host }) {
|
||||
return this.getXapi(host).isHyperThreadingEnabled(host._xapiId)
|
||||
}
|
||||
|
||||
isHyperThreadingEnabled.description = 'get hyper-threading information'
|
||||
|
||||
isHyperThreadingEnabled.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
isHyperThreadingEnabled.resolve = {
|
||||
host: ['id', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
@@ -85,18 +85,26 @@ createBonded.description =
|
||||
// ===================================================================
|
||||
|
||||
export async function set({
|
||||
network,
|
||||
|
||||
automatic,
|
||||
defaultIsLocked,
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
network,
|
||||
}) {
|
||||
await this.getXapi(network).setNetworkProperties(network._xapiId, {
|
||||
automatic,
|
||||
defaultIsLocked,
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
network = this.getXapiObject(network)
|
||||
|
||||
await Promise.all([
|
||||
automatic !== undefined &&
|
||||
network.update_other_config('automatic', automatic ? 'true' : null),
|
||||
defaultIsLocked !== undefined &&
|
||||
network.set_default_locking_mode(
|
||||
defaultIsLocked ? 'disabled' : 'unlocked'
|
||||
),
|
||||
nameDescription !== undefined &&
|
||||
network.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && network.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
async function delete_({ PBD }) {
|
||||
// TODO: check if PBD is attached before
|
||||
await this.getXapi(PBD).call('PBD.destroy', PBD._xapiRef)
|
||||
await this.getXapi(PBD).callAsync('PBD.destroy', PBD._xapiRef)
|
||||
}
|
||||
export { delete_ as delete }
|
||||
|
||||
@@ -37,7 +37,7 @@ disconnect.resolve = {
|
||||
|
||||
export async function connect({ PBD }) {
|
||||
// TODO: check if PBD is attached before
|
||||
await this.getXapi(PBD).call('PBD.plug', PBD._xapiRef)
|
||||
await this.getXapi(PBD).callAsync('PBD.plug', PBD._xapiRef)
|
||||
}
|
||||
|
||||
connect.params = {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// TODO: too low level, move into host.
|
||||
|
||||
import { filter, find } from 'lodash'
|
||||
|
||||
import { IPV4_CONFIG_MODES, IPV6_CONFIG_MODES } from '../xapi'
|
||||
|
||||
export function getIpv4ConfigurationModes() {
|
||||
@@ -15,7 +17,17 @@ export function getIpv6ConfigurationModes() {
|
||||
|
||||
async function delete_({ pif }) {
|
||||
// TODO: check if PIF is attached before
|
||||
await this.getXapi(pif).call('PIF.destroy', pif._xapiRef)
|
||||
const xapi = this.getXapi(pif)
|
||||
|
||||
const tunnels = filter(xapi.objects.all, { $type: 'tunnel' })
|
||||
const tunnel = find(tunnels, { access_PIF: pif._xapiRef })
|
||||
if (tunnel != null) {
|
||||
await xapi.callAsync('PIF.unplug', pif._xapiRef)
|
||||
await xapi.callAsync('tunnel.destroy', tunnel.$ref)
|
||||
return
|
||||
}
|
||||
|
||||
await xapi.callAsync('PIF.destroy', pif._xapiRef)
|
||||
}
|
||||
export { delete_ as delete }
|
||||
|
||||
@@ -32,7 +44,7 @@ delete_.resolve = {
|
||||
|
||||
export async function disconnect({ pif }) {
|
||||
// TODO: check if PIF is attached before
|
||||
await this.getXapi(pif).call('PIF.unplug', pif._xapiRef)
|
||||
await this.getXapi(pif).callAsync('PIF.unplug', pif._xapiRef)
|
||||
}
|
||||
|
||||
disconnect.params = {
|
||||
@@ -47,7 +59,7 @@ disconnect.resolve = {
|
||||
|
||||
export async function connect({ pif }) {
|
||||
// TODO: check if PIF is attached before
|
||||
await this.getXapi(pif).call('PIF.plug', pif._xapiRef)
|
||||
await this.getXapi(pif).callAsync('PIF.plug', pif._xapiRef)
|
||||
}
|
||||
|
||||
connect.params = {
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRPcError } from 'json-rpc-peer'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export async function set({
|
||||
pool,
|
||||
|
||||
// TODO: use camel case.
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
}) {
|
||||
await this.getXapi(pool).setPoolProperties({
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
pool = this.getXapiObject(pool)
|
||||
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && pool.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && pool.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
@@ -234,7 +235,7 @@ async function handleInstallSupplementalPack(req, res, { poolId }) {
|
||||
res.end(format.response(0))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRPcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,15 @@ import { forEach, parseXml } from '../utils'
|
||||
export async function set({
|
||||
sr,
|
||||
|
||||
// TODO: use camel case.
|
||||
name_description: nameDescription,
|
||||
name_label: nameLabel,
|
||||
}) {
|
||||
await this.getXapi(sr).setSrProperties(sr._xapiId, {
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
})
|
||||
sr = this.getXapiObject(sr)
|
||||
|
||||
await Promise.all([
|
||||
nameDescription !== undefined && sr.set_name_description(nameDescription),
|
||||
nameLabel !== undefined && sr.set_name_label(nameLabel),
|
||||
])
|
||||
}
|
||||
|
||||
set.params = {
|
||||
@@ -35,7 +36,7 @@ set.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function scan({ SR }) {
|
||||
await this.getXapi(SR).call('SR.scan', SR._xapiRef)
|
||||
await this.getXapi(SR).callAsync('SR.scan', SR._xapiRef)
|
||||
}
|
||||
|
||||
scan.params = {
|
||||
@@ -179,6 +180,35 @@ createIso.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function createFile({
|
||||
host,
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
location,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
return xapi.createSr({
|
||||
hostRef: host._xapiRef,
|
||||
name_label: nameLabel,
|
||||
name_description: nameDescription,
|
||||
type: 'file',
|
||||
device_config: { location },
|
||||
})
|
||||
}
|
||||
|
||||
createFile.params = {
|
||||
host: { type: 'string' },
|
||||
nameLabel: { type: 'string' },
|
||||
nameDescription: { type: 'string' },
|
||||
location: { type: 'string' },
|
||||
}
|
||||
|
||||
createFile.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// NFS SR
|
||||
|
||||
@@ -361,6 +391,58 @@ createExt.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect all ZFS pools
|
||||
// Return a dict of pools with their parameters { <poolname>: {<paramdict>}}
|
||||
// example output (the parameter mountpoint is of interest):
|
||||
// {"tank":
|
||||
// {
|
||||
// "setuid": "on", "relatime": "off", "referenced": "24K", "written": "24K", "zoned": "off", "primarycache": "all",
|
||||
// "logbias": "latency", "creation": "Mon May 27 17:24 2019", "sync": "standard", "snapdev": "hidden",
|
||||
// "dedup": "off", "sharenfs": "off", "usedbyrefreservation": "0B", "sharesmb": "off", "createtxg": "1",
|
||||
// "canmount": "on", "mountpoint": "/tank", "casesensitivity": "sensitive", "utf8only": "off", "xattr": "on",
|
||||
// "dnodesize": "legacy", "mlslabel": "none", "objsetid": "54", "defcontext": "none", "rootcontext": "none",
|
||||
// "mounted": "yes", "compression": "off", "overlay": "off", "logicalused": "47K", "usedbysnapshots": "0B",
|
||||
// "filesystem_count": "none", "copies": "1", "snapshot_limit": "none", "aclinherit": "restricted",
|
||||
// "compressratio": "1.00x", "readonly": "off", "version": "5", "normalization": "none", "filesystem_limit": "none",
|
||||
// "type": "filesystem", "secondarycache": "all", "refreservation": "none", "available": "17.4G", "used": "129K",
|
||||
// "exec": "on", "refquota": "none", "refcompressratio": "1.00x", "quota": "none", "keylocation": "none",
|
||||
// "snapshot_count": "none", "fscontext": "none", "vscan": "off", "reservation": "none", "atime": "on",
|
||||
// "recordsize": "128K", "usedbychildren": "105K", "usedbydataset": "24K", "guid": "656061077639704004",
|
||||
// "pbkdf2iters": "0", "checksum": "on", "special_small_blocks": "0", "redundant_metadata": "all",
|
||||
// "volmode": "default", "devices": "on", "keyformat": "none", "logicalreferenced": "12K", "acltype": "off",
|
||||
// "nbmand": "off", "context": "none", "encryption": "off", "snapdir": "hidden"}}
|
||||
export async function probeZfs({ host }) {
|
||||
const xapi = this.getXapi(host)
|
||||
try {
|
||||
const result = await xapi.call(
|
||||
'host.call_plugin',
|
||||
host._xapiRef,
|
||||
'zfs.py',
|
||||
'list_zfs_pools',
|
||||
{}
|
||||
)
|
||||
return JSON.parse(result)
|
||||
} catch (error) {
|
||||
if (
|
||||
error.code === 'XENAPI_MISSING_PLUGIN' ||
|
||||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
|
||||
) {
|
||||
return {}
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
probeZfs.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
probeZfs.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect all NFS shares (exports) on a NFS server
|
||||
// Return a table of exports with their paths and ACLs
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
export async function add({ tag, object }) {
|
||||
await this.getXapi(object).addTag(object._xapiId, tag)
|
||||
await this.getXapiObject(object).add_tags(tag)
|
||||
}
|
||||
|
||||
add.description = 'add a new tag to an object'
|
||||
@@ -16,7 +16,7 @@ add.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function remove({ tag, object }) {
|
||||
await this.getXapi(object).removeTag(object._xapiId, tag)
|
||||
await this.getXapiObject(object).remove_tags(tag)
|
||||
}
|
||||
|
||||
remove.description = 'remove an existing tag from an object'
|
||||
|
||||
@@ -34,3 +34,25 @@ delete_.permission = 'admin'
|
||||
delete_.params = {
|
||||
token: { type: 'string' },
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function deleteAll({ except }) {
|
||||
await this.deleteAuthenticationTokens({
|
||||
filter: {
|
||||
user_id: this.session.get('user_id'),
|
||||
id: {
|
||||
__not: except,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
deleteAll.description =
|
||||
'delete all tokens of the current user except the current one'
|
||||
|
||||
deleteAll.permission = ''
|
||||
|
||||
deleteAll.params = {
|
||||
except: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
@@ -48,8 +48,7 @@ connect.resolve = {
|
||||
|
||||
export async function set({ position, vbd }) {
|
||||
if (position !== undefined) {
|
||||
const xapi = this.getXapi(vbd)
|
||||
await xapi.call('VBD.set_userdevice', vbd._xapiRef, String(position))
|
||||
await this.getXapiObject(vbd).set_userdevice(String(position))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,9 +66,7 @@ set.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function setBootable({ vbd, bootable }) {
|
||||
const xapi = this.getXapi(vbd)
|
||||
|
||||
await xapi.call('VBD.set_bootable', vbd._xapiRef, bootable)
|
||||
await this.getXapiObject(vbd).set_bootable(bootable)
|
||||
}
|
||||
|
||||
setBootable.params = {
|
||||
|
||||
@@ -64,6 +64,7 @@ export async function set({
|
||||
allowedIpv4Addresses,
|
||||
allowedIpv6Addresses,
|
||||
attached,
|
||||
rateLimit,
|
||||
}) {
|
||||
const oldIpAddresses = vif.allowedIpv4Addresses.concat(
|
||||
vif.allowedIpv6Addresses
|
||||
@@ -91,6 +92,9 @@ export async function set({
|
||||
mac,
|
||||
currently_attached: attached,
|
||||
ipv4_allowed: newIpAddresses,
|
||||
qos_algorithm_type: rateLimit != null ? 'ratelimit' : undefined,
|
||||
qos_algorithm_params:
|
||||
rateLimit != null ? { kbps: String(rateLimit) } : undefined,
|
||||
})
|
||||
|
||||
await this.allocIpAddresses(newVif.$id, newIpAddresses)
|
||||
@@ -107,6 +111,7 @@ export async function set({
|
||||
return this.getXapi(vif).editVif(vif._xapiId, {
|
||||
ipv4Allowed: allowedIpv4Addresses,
|
||||
ipv6Allowed: allowedIpv6Addresses,
|
||||
rateLimit,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -129,6 +134,11 @@ set.params = {
|
||||
optional: true,
|
||||
},
|
||||
attached: { type: 'boolean', optional: true },
|
||||
rateLimit: {
|
||||
description: 'in kilobytes per seconds',
|
||||
optional: true,
|
||||
type: ['number', 'null'],
|
||||
},
|
||||
}
|
||||
|
||||
set.resolve = {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import defer from 'golike-defer'
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { assignWith, concat } from 'lodash'
|
||||
import {
|
||||
@@ -193,6 +193,11 @@ create.params = {
|
||||
optional: true,
|
||||
},
|
||||
|
||||
networkConfig: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
|
||||
coreOs: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
@@ -315,6 +320,11 @@ create.params = {
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
hvmBootFirmware: { type: 'string', optional: true },
|
||||
|
||||
// other params are passed to `editVm`
|
||||
'*': { type: 'any' },
|
||||
}
|
||||
|
||||
create.resolve = {
|
||||
@@ -555,6 +565,8 @@ set.params = {
|
||||
// Identifier of the VM to update.
|
||||
id: { type: 'string' },
|
||||
|
||||
auto_poweron: { type: 'boolean', optional: true },
|
||||
|
||||
name_label: { type: 'string', optional: true },
|
||||
|
||||
name_description: { type: 'string', optional: true },
|
||||
@@ -598,7 +610,7 @@ set.params = {
|
||||
// Switch from Cirrus video adaptor to VGA adaptor
|
||||
vga: { type: 'string', optional: true },
|
||||
|
||||
videoram: { type: ['string', 'number'], optional: true },
|
||||
videoram: { type: 'number', optional: true },
|
||||
|
||||
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
|
||||
|
||||
@@ -616,6 +628,9 @@ set.params = {
|
||||
|
||||
// set the VM network interface controller
|
||||
nicType: { type: ['string', 'null'], optional: true },
|
||||
|
||||
// set the VM boot firmware mode
|
||||
hvmBootFirmware: { type: ['string', 'null'], optional: true },
|
||||
}
|
||||
|
||||
set.resolve = {
|
||||
@@ -625,13 +640,7 @@ set.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function restart({ vm, force = false }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
if (force) {
|
||||
await xapi.call('VM.hard_reboot', vm._xapiRef)
|
||||
} else {
|
||||
await xapi.call('VM.clean_reboot', vm._xapiRef)
|
||||
}
|
||||
return this.getXapi(vm).rebootVm(vm._xapiId, { hard: force })
|
||||
}
|
||||
|
||||
restart.params = {
|
||||
@@ -732,7 +741,7 @@ export async function convertToTemplate({ vm }) {
|
||||
// Convert to a template requires pool admin permission.
|
||||
await this.checkPermissions(this.user.id, [[vm.$pool, 'administrate']])
|
||||
|
||||
await this.getXapi(vm).call('VM.set_is_a_template', vm._xapiRef, true)
|
||||
await this.getXapiObject(vm).set_is_a_template(true)
|
||||
}
|
||||
|
||||
convertToTemplate.params = {
|
||||
@@ -1084,7 +1093,7 @@ stop.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function suspend({ vm }) {
|
||||
await this.getXapi(vm).call('VM.suspend', vm._xapiRef)
|
||||
await this.getXapi(vm).callAsync('VM.suspend', vm._xapiRef)
|
||||
}
|
||||
|
||||
suspend.params = {
|
||||
@@ -1098,7 +1107,7 @@ suspend.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function pause({ vm }) {
|
||||
await this.getXapi(vm).call('VM.pause', vm._xapiRef)
|
||||
await this.getXapi(vm).callAsync('VM.pause', vm._xapiRef)
|
||||
}
|
||||
|
||||
pause.params = {
|
||||
@@ -1198,7 +1207,7 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
|
||||
res.end(format.response(0, vm.$id))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1361,9 +1370,7 @@ createInterface.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function attachPci({ vm, pciId }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
await xapi.call('VM.add_to_other_config', vm._xapiRef, 'pci', pciId)
|
||||
await this.getXapiObject(vm).update_other_config('pci', pciId)
|
||||
}
|
||||
|
||||
attachPci.params = {
|
||||
@@ -1378,9 +1385,7 @@ attachPci.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function detachPci({ vm }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
await xapi.call('VM.remove_from_other_config', vm._xapiRef, 'pci')
|
||||
await this.getXapiObject(vm).update_other_config('pci', null)
|
||||
}
|
||||
|
||||
detachPci.params = {
|
||||
@@ -1413,15 +1418,11 @@ stats.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function setBootOrder({ vm, order }) {
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
order = { order }
|
||||
if (vm.virtualizationMode === 'hvm') {
|
||||
await xapi.call('VM.set_HVM_boot_params', vm._xapiRef, order)
|
||||
return
|
||||
if (vm.virtualizationMode !== 'hvm') {
|
||||
throw invalidParameters('You can only set the boot order on a HVM guest')
|
||||
}
|
||||
|
||||
throw invalidParameters('You can only set the boot order on a HVM guest')
|
||||
await this.getXapiObject(vm).update_HVM_boot_params('order', order)
|
||||
}
|
||||
|
||||
setBootOrder.params = {
|
||||
|
||||
@@ -55,6 +55,7 @@ getAllObjects.description = 'Returns all XO objects'
|
||||
getAllObjects.params = {
|
||||
filter: { type: 'object', optional: true },
|
||||
limit: { type: 'number', optional: true },
|
||||
ndjson: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -269,10 +269,10 @@ export async function fixHostNotInNetwork({ xosanSr, host }) {
|
||||
if (pif) {
|
||||
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
|
||||
reconfigurePifIP(xapi, pif, newIP)
|
||||
await xapi.call('PIF.plug', pif.$ref)
|
||||
await xapi.callAsync('PIF.plug', pif.$ref)
|
||||
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
|
||||
if (PBD) {
|
||||
await xapi.call('PBD.plug', PBD.$ref)
|
||||
await xapi.callAsync('PBD.plug', PBD.$ref)
|
||||
}
|
||||
const sshKey = await getOrCreateSshKey(xapi)
|
||||
await callPlugin(xapi, host, 'receive_ssh_keys', {
|
||||
@@ -809,7 +809,7 @@ export const createSR = defer(async function(
|
||||
})
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 6 }
|
||||
log.debug('scanning new SR')
|
||||
await xapi.call('SR.scan', xosanSrRef)
|
||||
await xapi.callAsync('SR.scan', xosanSrRef)
|
||||
await this.rebindLicense({
|
||||
licenseId: license.id,
|
||||
oldBoundObjectId: tmpBoundObjectId,
|
||||
@@ -884,13 +884,13 @@ async function createVDIOnLVMWithoutSizeLimit(xapi, lvmSr, diskSize) {
|
||||
if (result.exit !== 0) {
|
||||
throw Error('Could not create volume ->' + result.stdout)
|
||||
}
|
||||
await xapi.call('SR.scan', xapi.getObject(lvmSr).$ref)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(lvmSr).$ref)
|
||||
const vdi = find(xapi.getObject(lvmSr).$VDIs, vdi => vdi.uuid === uuid)
|
||||
if (vdi != null) {
|
||||
await xapi.setSrProperties(vdi.$ref, {
|
||||
nameLabel: 'xosan_data',
|
||||
nameDescription: 'Created by XO',
|
||||
})
|
||||
await Promise.all([
|
||||
vdi.set_name_description('Created by XO'),
|
||||
vdi.set_name_label('xosan_data'),
|
||||
])
|
||||
return vdi
|
||||
}
|
||||
}
|
||||
@@ -989,7 +989,7 @@ async function replaceBrickOnSameVM(
|
||||
await xapi.disconnectVbd(previousVBD)
|
||||
await xapi.deleteVdi(previousVBD.VDI)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 4 }
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
@@ -1068,7 +1068,7 @@ export async function replaceBrick({
|
||||
await xapi.deleteVm(previousVMEntry.vm, true)
|
||||
}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 3 }
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
@@ -1115,7 +1115,7 @@ async function _prepareGlusterVm(
|
||||
const firstVif = newVM.$VIFs[0]
|
||||
if (xosanNetwork.$id !== firstVif.$network.$id) {
|
||||
try {
|
||||
await xapi.call('VIF.move', firstVif.$ref, xosanNetwork.$ref)
|
||||
await xapi.callAsync('VIF.move', firstVif.$ref, xosanNetwork.$ref)
|
||||
} catch (error) {
|
||||
if (error.code === 'MESSAGE_METHOD_UNKNOWN') {
|
||||
// VIF.move has been introduced in xenserver 7.0
|
||||
@@ -1124,7 +1124,7 @@ async function _prepareGlusterVm(
|
||||
}
|
||||
}
|
||||
}
|
||||
await xapi.addTag(newVM.$id, 'XOSAN')
|
||||
await newVM.add_tags('XOSAN')
|
||||
await xapi.editVm(newVM, {
|
||||
name_label: `XOSAN - ${lvmSr.name_label} - ${
|
||||
host.name_label
|
||||
@@ -1132,7 +1132,7 @@ async function _prepareGlusterVm(
|
||||
name_description: 'Xosan VM storage',
|
||||
memory: memorySize,
|
||||
})
|
||||
await xapi.call('VM.set_xenstore_data', newVM.$ref, xenstoreData)
|
||||
await newVM.set_xenstore_data(xenstoreData)
|
||||
const rootDisk = newVM.$VBDs
|
||||
.map(vbd => vbd && vbd.$VDI)
|
||||
.find(vdi => vdi && vdi.name_label === 'xosan_root')
|
||||
@@ -1330,7 +1330,7 @@ export const addBricks = defer(async function(
|
||||
data.nodes = data.nodes.concat(newNodes)
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 2 }
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
@@ -1382,7 +1382,7 @@ export const removeBricks = defer(async function($defer, { xosansr, bricks }) {
|
||||
)
|
||||
remove(data.nodes, node => ips.includes(node.vm.ip))
|
||||
await xapi.xo.setData(xosansr.id, 'xosan_config', data)
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
|
||||
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
|
||||
@@ -1542,9 +1542,10 @@ export async function downloadAndInstallXosanPack({ id, version, pool }) {
|
||||
const res = await this.requestResource('xosan', id, version)
|
||||
|
||||
await xapi.installSupplementalPackOnAllHosts(res)
|
||||
await xapi._updateObjectMapProperty(xapi.pool, 'other_config', {
|
||||
xosan_pack_installation_time: String(Math.floor(Date.now() / 1e3)),
|
||||
})
|
||||
await xapi.pool.update_other_config(
|
||||
'xosan_pack_installation_time',
|
||||
String(Math.floor(Date.now() / 1e3))
|
||||
)
|
||||
}
|
||||
|
||||
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'
|
||||
|
||||
@@ -93,7 +93,7 @@ async function loadConfiguration() {
|
||||
function createExpressApp(config) {
|
||||
const app = createExpress()
|
||||
|
||||
app.use(helmet())
|
||||
app.use(helmet(config.http.helmet))
|
||||
|
||||
app.use(compression())
|
||||
|
||||
@@ -417,6 +417,7 @@ const setUpProxies = (express, opts, xo) => {
|
||||
}
|
||||
|
||||
const proxy = createProxyServer({
|
||||
changeOrigin: true,
|
||||
ignorePath: true,
|
||||
}).on('error', error => console.error(error))
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
|
||||
import { parseProp } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Remote extends Model {}
|
||||
@@ -14,12 +16,21 @@ export class Remotes extends Collection {
|
||||
async get(properties) {
|
||||
const remotes = await super.get(properties)
|
||||
forEach(remotes, remote => {
|
||||
remote.benchmarks =
|
||||
remote.benchmarks !== undefined
|
||||
? JSON.parse(remote.benchmarks)
|
||||
: undefined
|
||||
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
|
||||
remote.enabled = remote.enabled === 'true'
|
||||
})
|
||||
return remotes
|
||||
}
|
||||
|
||||
_update(remotes) {
|
||||
return super._update(
|
||||
remotes.map(remote => {
|
||||
const { benchmarks } = remote
|
||||
if (benchmarks !== undefined) {
|
||||
remote.benchmarks = JSON.stringify(benchmarks)
|
||||
}
|
||||
return remote
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -569,6 +569,16 @@ const TRANSFORMS = {
|
||||
MAC: obj.MAC,
|
||||
MTU: +obj.MTU,
|
||||
|
||||
// in kB/s
|
||||
rateLimit: (() => {
|
||||
if (obj.qos_algorithm_type === 'ratelimit') {
|
||||
const { kbps } = obj.qos_algorithm_params
|
||||
if (kbps !== undefined) {
|
||||
return +kbps
|
||||
}
|
||||
}
|
||||
})(),
|
||||
|
||||
$network: link(obj, 'network'),
|
||||
$VM: link(obj, 'VM'),
|
||||
}
|
||||
@@ -633,7 +643,7 @@ const TRANSFORMS = {
|
||||
description: poolPatch.name_description,
|
||||
name: poolPatch.name_label,
|
||||
pool_patch: poolPatch.$ref,
|
||||
size: poolPatch.size,
|
||||
size: +poolPatch.size,
|
||||
guidance: poolPatch.after_apply_guidance,
|
||||
time: toTimestamp(obj.timestamp_applied),
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
/* eslint eslint-comments/disable-enable-pair: [error, {allowWholeFile: true}] */
|
||||
/* eslint-disable camelcase */
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
@@ -21,13 +22,14 @@ import { forbiddenOperation } from 'xo-common/api-errors'
|
||||
import { Xapi as XapiBase, NULL_REF } from 'xen-api'
|
||||
import {
|
||||
every,
|
||||
find,
|
||||
filter,
|
||||
find,
|
||||
flatMap,
|
||||
flatten,
|
||||
groupBy,
|
||||
includes,
|
||||
isEmpty,
|
||||
noop,
|
||||
omit,
|
||||
startsWith,
|
||||
uniq,
|
||||
@@ -227,14 +229,6 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// =================================================================
|
||||
|
||||
_setObjectProperty(object, name, value) {
|
||||
return this.call(
|
||||
`${object.$type}.set_${camelToSnakeCase(name)}`,
|
||||
object.$ref,
|
||||
prepareXapiParam(value)
|
||||
)
|
||||
}
|
||||
|
||||
_setObjectProperties(object, props) {
|
||||
const { $ref: ref, $type: type } = object
|
||||
|
||||
@@ -253,101 +247,10 @@ export default class Xapi extends XapiBase {
|
||||
)::ignoreErrors()
|
||||
}
|
||||
|
||||
async _updateObjectMapProperty(object, prop, values) {
|
||||
const { $ref: ref, $type: type } = object
|
||||
|
||||
prop = camelToSnakeCase(prop)
|
||||
|
||||
const add = `${type}.add_to_${prop}`
|
||||
const remove = `${type}.remove_from_${prop}`
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(values, (value, name) => {
|
||||
if (value !== undefined) {
|
||||
name = camelToSnakeCase(name)
|
||||
const removal = this.call(remove, ref, name)
|
||||
|
||||
return value === null
|
||||
? removal
|
||||
: removal
|
||||
::ignoreErrors()
|
||||
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async setHostProperties(id, { nameLabel, nameDescription }) {
|
||||
await this._setObjectProperties(this.getObject(id), {
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
})
|
||||
}
|
||||
|
||||
async setPoolProperties({ autoPoweron, nameLabel, nameDescription }) {
|
||||
const { pool } = this
|
||||
|
||||
await Promise.all([
|
||||
this._setObjectProperties(pool, {
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
}),
|
||||
autoPoweron != null &&
|
||||
this._updateObjectMapProperty(pool, 'other_config', {
|
||||
autoPoweron: autoPoweron ? 'true' : null,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
async setSrProperties(id, { nameLabel, nameDescription }) {
|
||||
await this._setObjectProperties(this.getObject(id), {
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
})
|
||||
}
|
||||
|
||||
async setNetworkProperties(
|
||||
id,
|
||||
{ automatic, defaultIsLocked, nameDescription, nameLabel }
|
||||
) {
|
||||
let defaultLockingMode
|
||||
if (defaultIsLocked != null) {
|
||||
defaultLockingMode = defaultIsLocked ? 'disabled' : 'unlocked'
|
||||
}
|
||||
const network = this.getObject(id)
|
||||
await Promise.all([
|
||||
this._setObjectProperties(network, {
|
||||
defaultLockingMode,
|
||||
nameDescription,
|
||||
nameLabel,
|
||||
}),
|
||||
this._updateObjectMapProperty(network, 'other_config', {
|
||||
automatic:
|
||||
automatic === undefined ? undefined : automatic ? 'true' : null,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
async addTag(id, tag) {
|
||||
const { $ref: ref, $type: type } = this.getObject(id)
|
||||
|
||||
await this.call(`${type}.add_tags`, ref, tag)
|
||||
}
|
||||
|
||||
async removeTag(id, tag) {
|
||||
const { $ref: ref, $type: type } = this.getObject(id)
|
||||
|
||||
await this.call(`${type}.remove_tags`, ref, tag)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
async setDefaultSr(srId) {
|
||||
this._setObjectProperties(this.pool, {
|
||||
default_SR: this.getObject(srId).$ref,
|
||||
})
|
||||
setDefaultSr(srId) {
|
||||
return this.pool.set_default_SR(this.getObject(srId).$ref)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
@@ -376,12 +279,12 @@ export default class Xapi extends XapiBase {
|
||||
await pSettle(
|
||||
mapToArray(vms, vm => {
|
||||
if (!vm.is_control_domain) {
|
||||
return this.call('VM.suspend', vm.$ref)
|
||||
return this.callAsync('VM.suspend', vm.$ref)
|
||||
}
|
||||
})
|
||||
)
|
||||
await this.call('host.disable', host.$ref)
|
||||
await this.call('host.shutdown', host.$ref)
|
||||
await this.callAsync('host.shutdown', host.$ref)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
@@ -394,7 +297,7 @@ export default class Xapi extends XapiBase {
|
||||
await this.call('host.disable', ref)
|
||||
|
||||
try {
|
||||
await this.call('host.evacuate', ref)
|
||||
await this.callAsync('host.evacuate', ref)
|
||||
} catch (error) {
|
||||
if (!force) {
|
||||
await this.call('host.enable', ref)
|
||||
@@ -409,7 +312,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
async forgetHost(hostId) {
|
||||
await this.call('host.destroy', this.getObject(hostId).$ref)
|
||||
await this.callAsync('host.destroy', this.getObject(hostId).$ref)
|
||||
}
|
||||
|
||||
async ejectHostFromPool(hostId) {
|
||||
@@ -444,9 +347,7 @@ export default class Xapi extends XapiBase {
|
||||
$defer(() => this.plugPbd(ref))
|
||||
})
|
||||
|
||||
return this._updateObjectMapProperty(
|
||||
host,
|
||||
'other_config',
|
||||
return host.update_other_config(
|
||||
multipathing
|
||||
? {
|
||||
multipathing: 'true',
|
||||
@@ -459,23 +360,23 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
async powerOnHost(hostId) {
|
||||
await this.call('host.power_on', this.getObject(hostId).$ref)
|
||||
await this.callAsync('host.power_on', this.getObject(hostId).$ref)
|
||||
}
|
||||
|
||||
async rebootHost(hostId, force = false) {
|
||||
const host = this.getObject(hostId)
|
||||
|
||||
await this._clearHost(host, force)
|
||||
await this.call('host.reboot', host.$ref)
|
||||
await this.callAsync('host.reboot', host.$ref)
|
||||
}
|
||||
|
||||
async restartHostAgent(hostId) {
|
||||
await this.call('host.restart_agent', this.getObject(hostId).$ref)
|
||||
await this.callAsync('host.restart_agent', this.getObject(hostId).$ref)
|
||||
}
|
||||
|
||||
async setRemoteSyslogHost(hostId, syslogDestination) {
|
||||
const host = this.getObject(hostId)
|
||||
await this.call('host.set_logging', host.$ref, {
|
||||
await host.set_logging({
|
||||
syslog_destination: syslogDestination,
|
||||
})
|
||||
await this.call('host.syslog_reconfigure', host.$ref)
|
||||
@@ -485,7 +386,7 @@ export default class Xapi extends XapiBase {
|
||||
const host = this.getObject(hostId)
|
||||
|
||||
await this._clearHost(host, force)
|
||||
await this.call('host.shutdown', host.$ref)
|
||||
await this.callAsync('host.shutdown', host.$ref)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
@@ -499,7 +400,7 @@ export default class Xapi extends XapiBase {
|
||||
}`
|
||||
)
|
||||
|
||||
return this.call('VM.clone', vm.$ref, nameLabel)
|
||||
return this.callAsync('VM.clone', vm.$ref, nameLabel).then(extractOpaqueRef)
|
||||
}
|
||||
|
||||
// Copy a VM: make a normal copy of a VM and all its VDIs.
|
||||
@@ -570,12 +471,7 @@ export default class Xapi extends XapiBase {
|
||||
stream = stream.pipe(sizeStream)
|
||||
|
||||
const onVmCreation =
|
||||
nameLabel !== undefined
|
||||
? vm =>
|
||||
targetXapi._setObjectProperties(vm, {
|
||||
nameLabel,
|
||||
})
|
||||
: null
|
||||
nameLabel !== undefined ? vm => vm.set_name_label(nameLabel) : null
|
||||
|
||||
const vm = await targetXapi._getOrWaitObject(
|
||||
await targetXapi._importVm(stream, sr, onVmCreation)
|
||||
@@ -715,17 +611,13 @@ export default class Xapi extends XapiBase {
|
||||
// It is necessary for suspended VMs to be shut down
|
||||
// to be able to delete their VDIs.
|
||||
if (vm.power_state !== 'Halted') {
|
||||
await this.call('VM.hard_shutdown', $ref)
|
||||
await this.callAsync('VM.hard_shutdown', $ref)
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
this.call('VM.set_is_a_template', vm.$ref, false),
|
||||
this._updateObjectMapProperty(vm, 'blocked_operations', {
|
||||
destroy: null,
|
||||
}),
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
default_template: null,
|
||||
}),
|
||||
vm.set_is_a_template(false),
|
||||
vm.update_blocked_operations('destroy', null),
|
||||
vm.update_other_config('default_template', null),
|
||||
])
|
||||
|
||||
// must be done before destroying the VM
|
||||
@@ -733,7 +625,7 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// this cannot be done in parallel, otherwise disks and snapshots will be
|
||||
// destroyed even if this fails
|
||||
await this.call('VM.destroy', $ref)
|
||||
await this.callAsync('VM.destroy', $ref)
|
||||
|
||||
return Promise.all([
|
||||
asyncMap(vm.$snapshots, snapshot =>
|
||||
@@ -869,7 +761,13 @@ export default class Xapi extends XapiBase {
|
||||
_assertHealthyVdiChains(vm) {
|
||||
const cache = { __proto__: null }
|
||||
forEach(vm.$VBDs, ({ $VDI }) => {
|
||||
this._assertHealthyVdiChain($VDI, cache)
|
||||
try {
|
||||
this._assertHealthyVdiChain($VDI, cache)
|
||||
} catch (error) {
|
||||
error.VDI = $VDI
|
||||
error.VM = vm
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1058,23 +956,21 @@ export default class Xapi extends XapiBase {
|
||||
await this._createVmRecord({
|
||||
...delta.vm,
|
||||
affinity: null,
|
||||
blocked_operations: {
|
||||
...delta.vm.blocked_operations,
|
||||
start: 'Importing…',
|
||||
},
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: `[Importing…] ${name_label}`,
|
||||
other_config: {
|
||||
...delta.vm.other_config,
|
||||
[TAG_COPY_SRC]: delta.vm.uuid,
|
||||
},
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
|
||||
await Promise.all([
|
||||
this._setObjectProperties(vm, {
|
||||
name_label: `[Importing…] ${name_label}`,
|
||||
}),
|
||||
this._updateObjectMapProperty(vm, 'blocked_operations', {
|
||||
start: 'Importing…',
|
||||
}),
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
[TAG_COPY_SRC]: delta.vm.uuid,
|
||||
}),
|
||||
])
|
||||
|
||||
// 2. Delete all VBDs which may have been created by the import.
|
||||
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
|
||||
|
||||
@@ -1096,9 +992,7 @@ export default class Xapi extends XapiBase {
|
||||
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi.$ref))
|
||||
|
||||
await this._updateObjectMapProperty(newVdi, 'other_config', {
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
})
|
||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||
} else {
|
||||
newVdi = await this.createVdi({
|
||||
...vdi,
|
||||
@@ -1193,15 +1087,15 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
this._setObjectProperties(vm, {
|
||||
name_label,
|
||||
}),
|
||||
delta.vm.ha_always_run && vm.set_ha_always_run(true),
|
||||
vm.set_name_label(name_label),
|
||||
// FIXME: move
|
||||
this._updateObjectMapProperty(vm, 'blocked_operations', {
|
||||
start: disableStartAfterImport
|
||||
vm.update_blocked_operations(
|
||||
'start',
|
||||
disableStartAfterImport
|
||||
? 'Do not start this VM, clone it if you want to use it.'
|
||||
: null,
|
||||
}),
|
||||
: null
|
||||
),
|
||||
])
|
||||
|
||||
return { transferSize, vm }
|
||||
@@ -1255,7 +1149,7 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
|
||||
const loop = () =>
|
||||
this.call(
|
||||
this.callAsync(
|
||||
'VM.migrate_send',
|
||||
vm.$ref,
|
||||
token,
|
||||
@@ -1269,7 +1163,7 @@ export default class Xapi extends XapiBase {
|
||||
pDelay(1e4).then(loop)
|
||||
)
|
||||
|
||||
return loop()
|
||||
return loop().then(noop)
|
||||
}
|
||||
|
||||
@synchronized()
|
||||
@@ -1424,14 +1318,8 @@ export default class Xapi extends XapiBase {
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
// Disable start and change the VM name label during import.
|
||||
await Promise.all([
|
||||
this.addForbiddenOperationToVm(
|
||||
vm.$id,
|
||||
'start',
|
||||
'OVA import in progress...'
|
||||
),
|
||||
this._setObjectProperties(vm, {
|
||||
name_label: `[Importing...] ${nameLabel}`,
|
||||
}),
|
||||
vm.update_blocked_operations('start', 'OVA import in progress...'),
|
||||
vm.set_name_label(`[Importing...] ${nameLabel}`),
|
||||
])
|
||||
|
||||
// 2. Create VDIs & Vifs.
|
||||
@@ -1448,7 +1336,7 @@ export default class Xapi extends XapiBase {
|
||||
$defer.onFailure(() => this._deleteVdi(vdi.$ref))
|
||||
|
||||
return this.createVbd({
|
||||
userdevice: disk.position,
|
||||
userdevice: String(disk.position),
|
||||
vdi,
|
||||
vm,
|
||||
})
|
||||
@@ -1491,8 +1379,8 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Enable start and restore the VM name label after import.
|
||||
await Promise.all([
|
||||
this.removeForbiddenOperationFromVm(vm.$id, 'start'),
|
||||
this._setObjectProperties(vm, { name_label: nameLabel }),
|
||||
vm.update_blocked_operations('start', null),
|
||||
vm.set_name_label(nameLabel),
|
||||
])
|
||||
return vm
|
||||
}
|
||||
@@ -1539,7 +1427,7 @@ export default class Xapi extends XapiBase {
|
||||
})
|
||||
} else {
|
||||
try {
|
||||
await this.call('VM.pool_migrate', vm.$ref, host.$ref, {
|
||||
await this.callAsync('VM.pool_migrate', vm.$ref, host.$ref, {
|
||||
force: 'true',
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -1624,19 +1512,11 @@ export default class Xapi extends XapiBase {
|
||||
return /* await */ this._snapshotVm(this.getObject(vmId), nameLabel)
|
||||
}
|
||||
|
||||
async setVcpuWeight(vmId, weight) {
|
||||
weight = weight || null // Take all falsy values as a removal (0 included)
|
||||
const vm = this.getObject(vmId)
|
||||
await this._updateObjectMapProperty(vm, 'VCPUs_params', { weight })
|
||||
}
|
||||
|
||||
async _startVm(vm, host, force) {
|
||||
log.debug(`Starting VM ${vm.name_label}`)
|
||||
|
||||
if (force) {
|
||||
await this._updateObjectMapProperty(vm, 'blocked_operations', {
|
||||
start: null,
|
||||
})
|
||||
await vm.update_blocked_operations('start', null)
|
||||
}
|
||||
|
||||
return host === undefined
|
||||
@@ -1646,7 +1526,7 @@ export default class Xapi extends XapiBase {
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
: this.call('VM.start_on', vm.$ref, host.$ref, false, false)
|
||||
: this.callAsync('VM.start_on', vm.$ref, host.$ref, false, false)
|
||||
}
|
||||
|
||||
async startVm(vmId, hostId, force) {
|
||||
@@ -1675,16 +1555,12 @@ export default class Xapi extends XapiBase {
|
||||
if (isVmHvm(vm)) {
|
||||
const { order } = vm.HVM_boot_params
|
||||
|
||||
await this._updateObjectMapProperty(vm, 'HVM_boot_params', {
|
||||
order: 'd',
|
||||
})
|
||||
await vm.update_HVM_boot_params('order', 'd')
|
||||
|
||||
try {
|
||||
await this._startVm(vm)
|
||||
} finally {
|
||||
await this._updateObjectMapProperty(vm, 'HVM_boot_params', {
|
||||
order,
|
||||
})
|
||||
await vm.update_HVM_boot_params('order', order)
|
||||
}
|
||||
} else {
|
||||
// Find the original template by name (*sigh*).
|
||||
@@ -1706,20 +1582,14 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
const cdDrive = this._getVmCdDrive(vm)
|
||||
forEach(vm.$VBDs, vbd => {
|
||||
promises.push(
|
||||
this._setObjectProperties(vbd, {
|
||||
bootable: vbd === cdDrive,
|
||||
})
|
||||
)
|
||||
promises.push(vbd.set_bootable(vbd === cdDrive))
|
||||
|
||||
bootables.push([vbd, Boolean(vbd.bootable)])
|
||||
})
|
||||
|
||||
promises.push(
|
||||
this._setObjectProperties(vm, {
|
||||
PV_bootloader: 'eliloader',
|
||||
}),
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
vm.set_PV_bootloader('eliloader'),
|
||||
vm.update_other_config({
|
||||
'install-distro':
|
||||
template && template.other_config['install-distro'],
|
||||
'install-repository': 'cdrom',
|
||||
@@ -1730,35 +1600,15 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
await this._startVm(vm)
|
||||
} finally {
|
||||
this._setObjectProperties(vm, {
|
||||
PV_bootloader: bootloader,
|
||||
})::ignoreErrors()
|
||||
vm.set_PV_bootloader(bootloader)::ignoreErrors()
|
||||
|
||||
forEach(bootables, ([vbd, bootable]) => {
|
||||
this._setObjectProperties(vbd, { bootable })::ignoreErrors()
|
||||
vbd.set_bootable(bootable)::ignoreErrors()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// vm_operations: http://xapi-project.github.io/xen-api/classes/vm.html
|
||||
async addForbiddenOperationToVm(vmId, operation, reason) {
|
||||
await this.call(
|
||||
'VM.add_to_blocked_operations',
|
||||
this.getObject(vmId).$ref,
|
||||
operation,
|
||||
`[XO] ${reason}`
|
||||
)
|
||||
}
|
||||
|
||||
async removeForbiddenOperationFromVm(vmId, operation) {
|
||||
await this.call(
|
||||
'VM.remove_from_blocked_operations',
|
||||
this.getObject(vmId).$ref,
|
||||
operation
|
||||
)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
|
||||
async createVbd({
|
||||
@@ -1819,14 +1669,14 @@ export default class Xapi extends XapiBase {
|
||||
})
|
||||
|
||||
if (isVmRunning(vm)) {
|
||||
await this.call('VBD.plug', vbdRef)
|
||||
await this.callAsync('VBD.plug', vbdRef)
|
||||
}
|
||||
}
|
||||
|
||||
_cloneVdi(vdi) {
|
||||
log.debug(`Cloning VDI ${vdi.name_label}`)
|
||||
|
||||
return this.call('VDI.clone', vdi.$ref)
|
||||
return this.callAsync('VDI.clone', vdi.$ref).then(extractOpaqueRef)
|
||||
}
|
||||
|
||||
async createVdi({
|
||||
@@ -1849,7 +1699,7 @@ export default class Xapi extends XapiBase {
|
||||
log.debug(`Creating VDI ${name_label} on ${sr.name_label}`)
|
||||
|
||||
return this._getOrWaitObject(
|
||||
await this.call('VDI.create', {
|
||||
await this.callAsync('VDI.create', {
|
||||
name_description,
|
||||
name_label,
|
||||
other_config,
|
||||
@@ -1861,7 +1711,7 @@ export default class Xapi extends XapiBase {
|
||||
type,
|
||||
virtual_size: size !== undefined ? parseSize(size) : virtual_size,
|
||||
xenstore_data,
|
||||
})
|
||||
}).then(extractOpaqueRef)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1879,9 +1729,12 @@ export default class Xapi extends XapiBase {
|
||||
}`
|
||||
)
|
||||
try {
|
||||
await pRetry(() => this.call('VDI.pool_migrate', vdi.$ref, sr.$ref, {}), {
|
||||
when: { code: 'TOO_MANY_STORAGE_MIGRATES' },
|
||||
})
|
||||
await pRetry(
|
||||
() => this.callAsync('VDI.pool_migrate', vdi.$ref, sr.$ref, {}),
|
||||
{
|
||||
when: { code: 'TOO_MANY_STORAGE_MIGRATES' },
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
const { code } = error
|
||||
if (
|
||||
@@ -1892,7 +1745,9 @@ export default class Xapi extends XapiBase {
|
||||
throw error
|
||||
}
|
||||
const newVdi = await this.barrier(
|
||||
await this.call('VDI.copy', vdi.$ref, sr.$ref)
|
||||
await this.callAsync('VDI.copy', vdi.$ref, sr.$ref).then(
|
||||
extractOpaqueRef
|
||||
)
|
||||
)
|
||||
await asyncMap(vdi.$VBDs, async vbd => {
|
||||
await this.call('VBD.destroy', vbd.$ref)
|
||||
@@ -1910,7 +1765,7 @@ export default class Xapi extends XapiBase {
|
||||
log.debug(`Deleting VDI ${vdiRef}`)
|
||||
|
||||
try {
|
||||
await this.call('VDI.destroy', vdiRef)
|
||||
await this.callAsync('VDI.destroy', vdiRef)
|
||||
} catch (error) {
|
||||
if (error?.code !== 'HANDLE_INVALID') {
|
||||
throw error
|
||||
@@ -1923,7 +1778,7 @@ export default class Xapi extends XapiBase {
|
||||
`Resizing VDI ${vdi.name_label} from ${vdi.virtual_size} to ${size}`
|
||||
)
|
||||
|
||||
return this.call('VDI.resize', vdi.$ref, size)
|
||||
return this.callAsync('VDI.resize', vdi.$ref, size)
|
||||
}
|
||||
|
||||
_getVmCdDrive(vm) {
|
||||
@@ -1937,7 +1792,7 @@ export default class Xapi extends XapiBase {
|
||||
async _ejectCdFromVm(vm) {
|
||||
const cdDrive = this._getVmCdDrive(vm)
|
||||
if (cdDrive) {
|
||||
await this.call('VBD.eject', cdDrive.$ref)
|
||||
await this.callAsync('VBD.eject', cdDrive.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1945,20 +1800,20 @@ export default class Xapi extends XapiBase {
|
||||
const cdDrive = await this._getVmCdDrive(vm)
|
||||
if (cdDrive) {
|
||||
try {
|
||||
await this.call('VBD.insert', cdDrive.$ref, cd.$ref)
|
||||
await this.callAsync('VBD.insert', cdDrive.$ref, cd.$ref)
|
||||
} catch (error) {
|
||||
if (!force || error.code !== 'VBD_NOT_EMPTY') {
|
||||
throw error
|
||||
}
|
||||
|
||||
await this.call('VBD.eject', cdDrive.$ref)::ignoreErrors()
|
||||
await this.callAsync('VBD.eject', cdDrive.$ref)::ignoreErrors()
|
||||
|
||||
// Retry.
|
||||
await this.call('VBD.insert', cdDrive.$ref, cd.$ref)
|
||||
await this.callAsync('VBD.insert', cdDrive.$ref, cd.$ref)
|
||||
}
|
||||
|
||||
if (bootable !== Boolean(cdDrive.bootable)) {
|
||||
await this._setObjectProperties(cdDrive, { bootable })
|
||||
await cdDrive.set_bootable(bootable)
|
||||
}
|
||||
} else {
|
||||
await this.createVbd({
|
||||
@@ -1971,7 +1826,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
async connectVbd(vbdId) {
|
||||
await this.call('VBD.plug', vbdId)
|
||||
await this.callAsync('VBD.plug', vbdId)
|
||||
}
|
||||
|
||||
async _disconnectVbd(vbd) {
|
||||
@@ -1980,7 +1835,7 @@ export default class Xapi extends XapiBase {
|
||||
await this.call('VBD.unplug_force', vbd.$ref)
|
||||
} catch (error) {
|
||||
if (error.code === 'VBD_NOT_UNPLUGGABLE') {
|
||||
await this.call('VBD.set_unpluggable', vbd.$ref, true)
|
||||
await vbd.set_unpluggable(true)
|
||||
return this.call('VBD.unplug_force', vbd.$ref)
|
||||
}
|
||||
}
|
||||
@@ -2031,11 +1886,11 @@ export default class Xapi extends XapiBase {
|
||||
const vdi = this.getObject(vdiId)
|
||||
|
||||
const snap = await this._getOrWaitObject(
|
||||
await this.call('VDI.snapshot', vdi.$ref)
|
||||
await this.callAsync('VDI.snapshot', vdi.$ref).then(extractOpaqueRef)
|
||||
)
|
||||
|
||||
if (nameLabel) {
|
||||
await this.call('VDI.set_name_label', snap.$ref, nameLabel)
|
||||
await snap.set_name_label(nameLabel)
|
||||
}
|
||||
|
||||
return snap
|
||||
@@ -2159,7 +2014,7 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
|
||||
if (currently_attached && isVmRunning(vm)) {
|
||||
await this.call('VIF.plug', vifRef)
|
||||
await this.callAsync('VIF.plug', vifRef)
|
||||
}
|
||||
|
||||
return vifRef
|
||||
@@ -2187,7 +2042,7 @@ export default class Xapi extends XapiBase {
|
||||
// https://citrix.github.io/xenserver-sdk/#network
|
||||
other_config: { automatic: 'false' },
|
||||
})
|
||||
$defer.onFailure(() => this.call('network.destroy', networkRef))
|
||||
$defer.onFailure(() => this.callAsync('network.destroy', networkRef))
|
||||
if (pifId) {
|
||||
await this.call(
|
||||
'pool.create_VLAN_from_PIF',
|
||||
@@ -2226,7 +2081,7 @@ export default class Xapi extends XapiBase {
|
||||
await Promise.all(
|
||||
mapToArray(
|
||||
vlans,
|
||||
vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan)
|
||||
vlan => vlan !== NULL_REF && this.callAsync('VLAN.destroy', vlan)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -2241,7 +2096,7 @@ export default class Xapi extends XapiBase {
|
||||
newPifs,
|
||||
pifRef =>
|
||||
!wasAttached[this.getObject(pifRef).host] &&
|
||||
this.call('PIF.unplug', pifRef)::ignoreErrors()
|
||||
this.callAsync('PIF.unplug', pifRef)::ignoreErrors()
|
||||
)
|
||||
)
|
||||
}
|
||||
@@ -2272,7 +2127,7 @@ export default class Xapi extends XapiBase {
|
||||
await Promise.all(
|
||||
mapToArray(
|
||||
vlans,
|
||||
vlan => vlan !== NULL_REF && this.call('VLAN.destroy', vlan)
|
||||
vlan => vlan !== NULL_REF && this.callAsync('VLAN.destroy', vlan)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -2281,7 +2136,17 @@ export default class Xapi extends XapiBase {
|
||||
mapToArray(bonds, bond => this.call('Bond.destroy', bond))
|
||||
)
|
||||
|
||||
await this.call('network.destroy', network.$ref)
|
||||
const tunnels = filter(this.objects.all, { $type: 'tunnel' })
|
||||
await Promise.all(
|
||||
map(pifs, async pif => {
|
||||
const tunnel = find(tunnels, { access_PIF: pif.$ref })
|
||||
if (tunnel != null) {
|
||||
await this.callAsync('tunnel.destroy', tunnel.$ref)
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
await this.callAsync('network.destroy', network.$ref)
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
@@ -2481,7 +2346,7 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
async _assertConsistentHostServerTime(hostRef) {
|
||||
async assertConsistentHostServerTime(hostRef) {
|
||||
const delta =
|
||||
parseDateTime(await this.call('host.get_servertime', hostRef)).getTime() -
|
||||
Date.now()
|
||||
@@ -2493,4 +2358,27 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async isHyperThreadingEnabled(hostId) {
|
||||
try {
|
||||
return (
|
||||
(await this.call(
|
||||
'host.call_plugin',
|
||||
this.getObject(hostId).$ref,
|
||||
'hyperthreading.py',
|
||||
'get_hyperthreading',
|
||||
{}
|
||||
)) !== 'false'
|
||||
)
|
||||
} catch (error) {
|
||||
if (
|
||||
error.code === 'XENAPI_MISSING_PLUGIN' ||
|
||||
error.code === 'UNKNOWN_XENAPI_PLUGIN_FUNCTION'
|
||||
) {
|
||||
return null
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,18 +68,12 @@ declare export class Xapi {
|
||||
sr?: XapiObject,
|
||||
onVmCreation?: (XapiObject) => any
|
||||
): Promise<string>;
|
||||
_updateObjectMapProperty(
|
||||
object: XapiObject,
|
||||
property: string,
|
||||
entries: $Dict<null | string>
|
||||
): Promise<void>;
|
||||
_setObjectProperties(
|
||||
object: XapiObject,
|
||||
properties: $Dict<mixed>
|
||||
): Promise<void>;
|
||||
_snapshotVm(cancelToken: mixed, vm: Vm, nameLabel?: string): Promise<Vm>;
|
||||
|
||||
addTag(object: Id, tag: string): Promise<void>;
|
||||
barrier(): Promise<void>;
|
||||
barrier(ref: string): Promise<XapiObject>;
|
||||
deleteVm(vm: Id): Promise<void>;
|
||||
|
||||
@@ -4,13 +4,13 @@ import { makeEditObject } from '../utils'
|
||||
|
||||
export default {
|
||||
async _connectVif(vif) {
|
||||
await this.call('VIF.plug', vif.$ref)
|
||||
await this.callAsync('VIF.plug', vif.$ref)
|
||||
},
|
||||
async connectVif(vifId) {
|
||||
await this._connectVif(this.getObject(vifId))
|
||||
},
|
||||
async _deleteVif(vif) {
|
||||
await this.call('VIF.destroy', vif.$ref)
|
||||
await this.callAsync('VIF.destroy', vif.$ref)
|
||||
},
|
||||
async deleteVif(vifId) {
|
||||
const vif = this.getObject(vifId)
|
||||
@@ -20,7 +20,7 @@ export default {
|
||||
await this._deleteVif(vif)
|
||||
},
|
||||
async _disconnectVif(vif) {
|
||||
await this.call('VIF.unplug_force', vif.$ref)
|
||||
await this.callAsync('VIF.unplug_force', vif.$ref)
|
||||
},
|
||||
async disconnectVif(vifId) {
|
||||
await this._disconnectVif(this.getObject(vifId))
|
||||
@@ -37,7 +37,7 @@ export default {
|
||||
: 'locked'
|
||||
|
||||
if (lockingMode !== vif.locking_mode) {
|
||||
return this._set('locking_mode', lockingMode)
|
||||
return vif.set_locking_mode(lockingMode)
|
||||
}
|
||||
},
|
||||
],
|
||||
@@ -53,10 +53,36 @@ export default {
|
||||
: 'locked'
|
||||
|
||||
if (lockingMode !== vif.locking_mode) {
|
||||
return this._set('locking_mode', lockingMode)
|
||||
return vif.set_locking_mode(lockingMode)
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
// in kB/s
|
||||
rateLimit: {
|
||||
get: vif => {
|
||||
if (vif.qos_algorithm_type === 'ratelimit') {
|
||||
const { kbps } = vif.qos_algorithm_params
|
||||
if (kbps !== undefined) {
|
||||
return +kbps
|
||||
}
|
||||
}
|
||||
|
||||
// null is value used to remove the existing value
|
||||
//
|
||||
// we need to match this, to allow avoiding the `set` if the value is
|
||||
// already missing.
|
||||
return null
|
||||
},
|
||||
set: (value, vif) =>
|
||||
Promise.all([
|
||||
vif.set_qos_algorithm_type(value === null ? '' : 'ratelimit'),
|
||||
vif.update_qos_algorithm_params(
|
||||
'kbps',
|
||||
value === null ? null : String(value)
|
||||
),
|
||||
]),
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -353,9 +353,10 @@ export default {
|
||||
if (JSON.parse(update).exit !== 0) {
|
||||
throw new Error('Update install failed')
|
||||
} else {
|
||||
await this._updateObjectMapProperty(host, 'other_config', {
|
||||
rpm_patch_installation_time: String(Date.now() / 1000),
|
||||
})
|
||||
await host.update_other_config(
|
||||
'rpm_patch_installation_time',
|
||||
String(Date.now() / 1000)
|
||||
)
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
@@ -35,7 +35,7 @@ export default {
|
||||
},
|
||||
|
||||
_plugPbd(pbd) {
|
||||
return this.call('PBD.plug', pbd.$ref)
|
||||
return this.callAsync('PBD.plug', pbd.$ref)
|
||||
},
|
||||
|
||||
async plugPbd(id) {
|
||||
@@ -43,7 +43,7 @@ export default {
|
||||
},
|
||||
|
||||
_unplugPbd(pbd) {
|
||||
return this.call('PBD.unplug', pbd.$ref)
|
||||
return this.callAsync('PBD.unplug', pbd.$ref)
|
||||
},
|
||||
|
||||
async unplugPbd(id) {
|
||||
@@ -84,4 +84,32 @@ export default {
|
||||
})
|
||||
return unhealthyVdis
|
||||
},
|
||||
|
||||
async createSr({
|
||||
hostRef,
|
||||
|
||||
content_type = 'user', // recommended by Citrix
|
||||
device_config = {},
|
||||
name_description = '',
|
||||
name_label,
|
||||
shared = false,
|
||||
physical_size = 0,
|
||||
sm_config = {},
|
||||
type,
|
||||
}) {
|
||||
const srRef = await this.call(
|
||||
'SR.create',
|
||||
hostRef,
|
||||
device_config,
|
||||
physical_size,
|
||||
name_label,
|
||||
name_description,
|
||||
type,
|
||||
content_type,
|
||||
shared,
|
||||
sm_config
|
||||
)
|
||||
|
||||
return (await this.barrier(srRef)).uuid
|
||||
},
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ export default {
|
||||
|
||||
coreOs = false,
|
||||
cloudConfig = undefined,
|
||||
networkConfig = undefined,
|
||||
|
||||
vgpuType = undefined,
|
||||
gpuGroup = undefined,
|
||||
@@ -93,7 +94,7 @@ export default {
|
||||
|
||||
// Creates the VDIs and executes the initial steps of the
|
||||
// installation.
|
||||
await this.call('VM.provision', vmRef)
|
||||
await this.callAsync('VM.provision', vmRef)
|
||||
|
||||
let vm = await this._getOrWaitObject(vmRef)
|
||||
|
||||
@@ -106,17 +107,12 @@ export default {
|
||||
|
||||
if (isHvm) {
|
||||
if (!isEmpty(vdis) || installMethod === 'network') {
|
||||
const { HVM_boot_params: bootParams } = vm
|
||||
let order = bootParams.order
|
||||
if (order) {
|
||||
order = 'n' + order.replace('n', '')
|
||||
} else {
|
||||
order = 'ncd'
|
||||
}
|
||||
const { order } = vm.HVM_boot_params
|
||||
|
||||
this._setObjectProperties(vm, {
|
||||
HVM_boot_params: { ...bootParams, order },
|
||||
})
|
||||
vm.update_HVM_boot_params(
|
||||
'order',
|
||||
order ? 'n' + order.replace('n', '') : 'ncd'
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// PV
|
||||
@@ -124,13 +120,12 @@ export default {
|
||||
if (installMethod === 'network') {
|
||||
// TODO: normalize RHEL URL?
|
||||
|
||||
await this._updateObjectMapProperty(vm, 'other_config', {
|
||||
'install-repository': installRepository,
|
||||
})
|
||||
await vm.update_other_config(
|
||||
'install-repository',
|
||||
installRepository
|
||||
)
|
||||
} else if (installMethod === 'cd') {
|
||||
await this._updateObjectMapProperty(vm, 'other_config', {
|
||||
'install-repository': 'cdrom',
|
||||
})
|
||||
await vm.update_other_config('install-repository', 'cdrom')
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -241,10 +236,16 @@ export default {
|
||||
}
|
||||
})
|
||||
|
||||
const method = coreOs
|
||||
? 'createCoreOsCloudInitConfigDrive'
|
||||
: 'createCloudInitConfigDrive'
|
||||
await this[method](vm.$id, srRef, cloudConfig)
|
||||
if (coreOs) {
|
||||
await this.createCoreOsCloudInitConfigDrive(vm.$id, srRef, cloudConfig)
|
||||
} else {
|
||||
await this.createCloudInitConfigDrive(
|
||||
vm.$id,
|
||||
srRef,
|
||||
cloudConfig,
|
||||
networkConfig
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the record with all the VBDs and VIFs
|
||||
@@ -257,25 +258,15 @@ export default {
|
||||
_editVm: makeEditObject({
|
||||
affinityHost: {
|
||||
get: 'affinity',
|
||||
set(value, vm) {
|
||||
return this._setObjectProperty(
|
||||
vm,
|
||||
'affinity',
|
||||
value ? this.getObject(value).$ref : NULL_REF
|
||||
)
|
||||
},
|
||||
set: (value, vm) =>
|
||||
vm.set_affinity(value ? this.getObject(value).$ref : NULL_REF),
|
||||
},
|
||||
|
||||
autoPoweron: {
|
||||
set(value, vm) {
|
||||
return Promise.all([
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
autoPoweron: value ? 'true' : null,
|
||||
}),
|
||||
value &&
|
||||
this.setPoolProperties({
|
||||
autoPoweron: true,
|
||||
}),
|
||||
vm.update_other_config('auto_poweron', value ? 'true' : null),
|
||||
value && vm.$pool.update_other_config('auto_poweron', 'true'),
|
||||
])
|
||||
},
|
||||
},
|
||||
@@ -285,23 +276,19 @@ export default {
|
||||
if (virtualizationMode !== 'pv' && virtualizationMode !== 'hvm') {
|
||||
throw new Error(`The virtualization mode must be 'pv' or 'hvm'`)
|
||||
}
|
||||
return this._set('domain_type', virtualizationMode)::pCatch(
|
||||
{ code: 'MESSAGE_METHOD_UNKNOWN' },
|
||||
() =>
|
||||
this._set(
|
||||
'HVM_boot_policy',
|
||||
return vm
|
||||
.set_domain_type(virtualizationMode)
|
||||
::pCatch({ code: 'MESSAGE_METHOD_UNKNOWN' }, () =>
|
||||
vm.set_HVM_boot_policy(
|
||||
virtualizationMode === 'hvm' ? 'Boot order' : ''
|
||||
)
|
||||
)
|
||||
)
|
||||
},
|
||||
},
|
||||
|
||||
coresPerSocket: {
|
||||
set(coresPerSocket, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'platform', {
|
||||
'cores-per-socket': coresPerSocket,
|
||||
})
|
||||
},
|
||||
set: (coresPerSocket, vm) =>
|
||||
vm.update_platform('cores-per-socket', String(coresPerSocket)),
|
||||
},
|
||||
|
||||
CPUs: 'cpus',
|
||||
@@ -319,26 +306,22 @@ export default {
|
||||
get: vm => +vm.VCPUs_at_startup,
|
||||
set: [
|
||||
'VCPUs_at_startup',
|
||||
function(value, vm) {
|
||||
return isVmRunning(vm) && this._set('VCPUs_number_live', value)
|
||||
},
|
||||
(value, vm) => isVmRunning(vm) && vm.set_VCPUs_number_live(value),
|
||||
],
|
||||
},
|
||||
|
||||
cpuCap: {
|
||||
get: vm => vm.VCPUs_params.cap && +vm.VCPUs_params.cap,
|
||||
set(cap, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'VCPUs_params', { cap })
|
||||
},
|
||||
set: (cap, vm) => vm.update_VCPUs_params('cap', String(cap)),
|
||||
},
|
||||
|
||||
cpuMask: {
|
||||
get: vm => vm.VCPUs_params.mask && vm.VCPUs_params.mask.split(','),
|
||||
set(cpuMask, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'VCPUs_params', {
|
||||
mask: cpuMask == null ? cpuMask : cpuMask.join(','),
|
||||
})
|
||||
},
|
||||
set: (cpuMask, vm) =>
|
||||
vm.update_VCPUs_params(
|
||||
'mask',
|
||||
cpuMask == null ? cpuMask : cpuMask.join(',')
|
||||
),
|
||||
},
|
||||
|
||||
cpusMax: 'cpusStaticMax',
|
||||
@@ -352,15 +335,15 @@ export default {
|
||||
|
||||
cpuWeight: {
|
||||
get: vm => vm.VCPUs_params.weight && +vm.VCPUs_params.weight,
|
||||
set(weight, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'VCPUs_params', { weight })
|
||||
},
|
||||
set: (weight, vm) =>
|
||||
vm.update_VCPUs_params(
|
||||
'weight',
|
||||
weight === null ? null : String(weight)
|
||||
),
|
||||
},
|
||||
|
||||
highAvailability: {
|
||||
set(ha, vm) {
|
||||
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
|
||||
},
|
||||
set: (ha, vm) => vm.set_ha_restart_priority(ha),
|
||||
},
|
||||
|
||||
memoryMin: {
|
||||
@@ -432,19 +415,12 @@ export default {
|
||||
hasVendorDevice: true,
|
||||
|
||||
expNestedHvm: {
|
||||
set(expNestedHvm, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'platform', {
|
||||
'exp-nested-hvm': expNestedHvm ? 'true' : null,
|
||||
})
|
||||
},
|
||||
set: (expNestedHvm, vm) =>
|
||||
vm.update_platform('exp-nested-hvm', expNestedHvm ? 'true' : null),
|
||||
},
|
||||
|
||||
nicType: {
|
||||
set(nicType, vm) {
|
||||
return this._updateObjectMapProperty(vm, 'platform', {
|
||||
nic_type: nicType,
|
||||
})
|
||||
},
|
||||
set: (nicType, vm) => vm.update_platform('nic_type', nicType),
|
||||
},
|
||||
|
||||
vga: {
|
||||
@@ -454,7 +430,7 @@ export default {
|
||||
`The different values that the VGA can take are: ${XEN_VGA_VALUES}`
|
||||
)
|
||||
}
|
||||
return this._updateObjectMapProperty(vm, 'platform', { vga })
|
||||
return vm.update_platform('vga', vga)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -465,15 +441,17 @@ export default {
|
||||
`The different values that the video RAM can take are: ${XEN_VIDEORAM_VALUES}`
|
||||
)
|
||||
}
|
||||
return this._updateObjectMapProperty(vm, 'platform', { videoram })
|
||||
return vm.update_platform('videoram', String(videoram))
|
||||
},
|
||||
},
|
||||
|
||||
startDelay: {
|
||||
get: vm => +vm.start_delay,
|
||||
set(startDelay, vm) {
|
||||
return this.call('VM.set_start_delay', vm.$ref, startDelay)
|
||||
},
|
||||
set: (startDelay, vm) => vm.set_start_delay(startDelay),
|
||||
},
|
||||
|
||||
hvmBootFirmware: {
|
||||
set: (firmware, vm) => vm.update_HVM_boot_params('firmware', firmware),
|
||||
},
|
||||
}),
|
||||
|
||||
@@ -486,7 +464,7 @@ export default {
|
||||
if (snapshotBefore) {
|
||||
await this._snapshotVm(snapshot.$snapshot_of)
|
||||
}
|
||||
await this.call('VM.revert', snapshot.$ref)
|
||||
await this.callAsync('VM.revert', snapshot.$ref)
|
||||
if (snapshot.snapshot_info['power-state-at-snapshot'] === 'Running') {
|
||||
const vm = await this.barrier(snapshot.snapshot_of)
|
||||
if (vm.power_state === 'Halted') {
|
||||
@@ -499,15 +477,22 @@ export default {
|
||||
|
||||
async resumeVm(vmId) {
|
||||
// the force parameter is always true
|
||||
return this.call('VM.resume', this.getObject(vmId).$ref, false, true)
|
||||
await this.callAsync('VM.resume', this.getObject(vmId).$ref, false, true)
|
||||
},
|
||||
|
||||
async unpauseVm(vmId) {
|
||||
return this.call('VM.unpause', this.getObject(vmId).$ref)
|
||||
await this.callAsync('VM.unpause', this.getObject(vmId).$ref)
|
||||
},
|
||||
|
||||
rebootVm(vmId, { hard = false } = {}) {
|
||||
return this.callAsync(
|
||||
`VM.${hard ? 'hard' : 'clean'}_reboot`,
|
||||
this.getObject(vmId).$ref
|
||||
).then(noop)
|
||||
},
|
||||
|
||||
shutdownVm(vmId, { hard = false } = {}) {
|
||||
return this.call(
|
||||
return this.callAsync(
|
||||
`VM.${hard ? 'hard' : 'clean'}_shutdown`,
|
||||
this.getObject(vmId).$ref
|
||||
).then(noop)
|
||||
|
||||
@@ -148,8 +148,8 @@ export const makeEditObject = specs => {
|
||||
|
||||
if (set === true) {
|
||||
const prop = camelToSnakeCase(name)
|
||||
return function(value) {
|
||||
return this._set(prop, value)
|
||||
return function(value, obj) {
|
||||
return this.setField(obj.$type, obj.$ref, prop, value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,16 +157,22 @@ export const makeEditObject = specs => {
|
||||
const index = set.indexOf('.')
|
||||
if (index === -1) {
|
||||
const prop = camelToSnakeCase(set)
|
||||
return function(value) {
|
||||
return this._set(prop, value)
|
||||
return function(value, obj) {
|
||||
return this.setField(obj.$type, obj.$ref, prop, value)
|
||||
}
|
||||
}
|
||||
|
||||
const map = set.slice(0, index)
|
||||
const prop = set.slice(index + 1)
|
||||
const field = set.slice(0, index)
|
||||
const entry = set.slice(index + 1)
|
||||
|
||||
return function(value, object) {
|
||||
return this._updateObjectMapProperty(object, map, { [prop]: value })
|
||||
return this.setFieldEntry(
|
||||
object.$type,
|
||||
object.$ref,
|
||||
field,
|
||||
entry,
|
||||
value
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,16 +255,6 @@ export const makeEditObject = specs => {
|
||||
const limits = checkLimits && {}
|
||||
const object = this.getObject(id)
|
||||
|
||||
const _objectRef = object.$ref
|
||||
const _setMethodPrefix = `${object.$type}.set_`
|
||||
|
||||
// Context used to execute functions.
|
||||
const context = {
|
||||
__proto__: this,
|
||||
_set: (prop, value) =>
|
||||
this.call(_setMethodPrefix + prop, _objectRef, prepareXapiParam(value)),
|
||||
}
|
||||
|
||||
const set = (value, name) => {
|
||||
if (value === undefined) {
|
||||
return
|
||||
@@ -287,7 +283,7 @@ export const makeEditObject = specs => {
|
||||
}
|
||||
}
|
||||
|
||||
const cb = () => spec.set.call(context, value, object)
|
||||
const cb = () => spec.set.call(this, value, object)
|
||||
|
||||
const { constraints } = spec
|
||||
if (constraints) {
|
||||
|
||||
@@ -60,8 +60,9 @@ function checkParams(method, params) {
|
||||
|
||||
const result = schemaInspector.validate(
|
||||
{
|
||||
type: 'object',
|
||||
properties: schema,
|
||||
strict: true,
|
||||
type: 'object',
|
||||
},
|
||||
params
|
||||
)
|
||||
@@ -261,11 +262,15 @@ export default class Api {
|
||||
//
|
||||
// The goal here is to standardize the calls by always providing
|
||||
// an id parameter when possible to simplify calls to the API.
|
||||
if (params != null && params.id === undefined) {
|
||||
if (params?.id === undefined) {
|
||||
const namespace = name.slice(0, name.indexOf('.'))
|
||||
const id = params[namespace]
|
||||
if (typeof id === 'string') {
|
||||
params.id = id
|
||||
const spec = method.params
|
||||
if (spec !== undefined && 'id' in spec && !(namespace in spec)) {
|
||||
const id = params[namespace]
|
||||
if (typeof id === 'string') {
|
||||
delete params[namespace]
|
||||
params.id = id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { invalidCredentials, noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
@@ -193,6 +194,14 @@ export default class {
|
||||
}
|
||||
}
|
||||
|
||||
async deleteAuthenticationTokens({ filter }) {
|
||||
return Promise.all(
|
||||
(await this._tokens.get())
|
||||
.filter(createPredicate(filter))
|
||||
.map(({ id }) => this.deleteAuthenticationToken(id))
|
||||
)
|
||||
}
|
||||
|
||||
async getAuthenticationToken(id) {
|
||||
let token = await this._tokens.first(id)
|
||||
if (token === undefined) {
|
||||
|
||||