Compare commits
105 Commits
xo-server-
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
abb65325b5 | ||
|
|
5757afa1d8 | ||
|
|
86e9b9c1b8 | ||
|
|
1cdd1fa00e | ||
|
|
9d12759c68 | ||
|
|
594341fab6 | ||
|
|
4e88125cbe | ||
|
|
13237180a2 | ||
|
|
f64d7e0b6e | ||
|
|
040a6930a4 | ||
|
|
c54b9189a6 | ||
|
|
8882f1b019 | ||
|
|
ae6416c4d2 | ||
|
|
8faed87656 | ||
|
|
0983f05969 | ||
|
|
d43e2544a1 | ||
|
|
ca83d11ac8 | ||
|
|
1cdcdd9b5f | ||
|
|
cc7806e35b | ||
|
|
0ee48b6623 | ||
|
|
8c02e0efbd | ||
|
|
34d3ca82bc | ||
|
|
43822d3667 | ||
|
|
f4ac73b3b4 | ||
|
|
f084b6def9 | ||
|
|
a00d101ff7 | ||
|
|
9d5900d9b6 | ||
|
|
28fb4e8216 | ||
|
|
bec4dbe652 | ||
|
|
72cc14f508 | ||
|
|
d20941cc2c | ||
|
|
9cb8a05316 | ||
|
|
dccd799f6d | ||
|
|
b42b3d1b01 | ||
|
|
a40d6f772a | ||
|
|
6e9bfd18d9 | ||
|
|
3b92dd0139 | ||
|
|
564d53610a | ||
|
|
b4c7b8ac7f | ||
|
|
7acd90307b | ||
|
|
d3ec76c19f | ||
|
|
688cb20674 | ||
|
|
c63be20bea | ||
|
|
df36633223 | ||
|
|
3597621d88 | ||
|
|
8387684839 | ||
|
|
f261f395f1 | ||
|
|
f27170ff0e | ||
|
|
d82c951db6 | ||
|
|
41ca853e03 | ||
|
|
a08d098265 | ||
|
|
875681b8ce | ||
|
|
a03dcbbf55 | ||
|
|
97cabbbc69 | ||
|
|
13725a9e21 | ||
|
|
f47df961f7 | ||
|
|
2f644d5eeb | ||
|
|
4b292bb78c | ||
|
|
804891cc81 | ||
|
|
d335e06371 | ||
|
|
477058ad23 | ||
|
|
eb3b68401d | ||
|
|
865d2df124 | ||
|
|
88160bae1d | ||
|
|
f581e93b88 | ||
|
|
21a7cf7158 | ||
|
|
5edee4bae0 | ||
|
|
916ca5576a | ||
|
|
6c861bfd1f | ||
|
|
56961b55bd | ||
|
|
cdcd7154ba | ||
|
|
654a2ee870 | ||
|
|
903634073a | ||
|
|
0d4818feb6 | ||
|
|
d6aa40679b | ||
|
|
b7cc31c94d | ||
|
|
6860156b6f | ||
|
|
29486c9ce2 | ||
|
|
7cfa6a5da4 | ||
|
|
2563be472b | ||
|
|
7289e856d9 | ||
|
|
975de1954e | ||
|
|
95bcf0c080 | ||
|
|
f900a5ef4f | ||
|
|
7f1ab529ae | ||
|
|
49fc86e4b1 | ||
|
|
924aef84f1 | ||
|
|
96e6e2b72a | ||
|
|
71997d4e65 | ||
|
|
447f2f9506 | ||
|
|
79aef9024b | ||
|
|
fdf6f4fdf3 | ||
|
|
4d1eaaaade | ||
|
|
bdad6c0f6d | ||
|
|
ff1ca5d933 | ||
|
|
2cf4c494a4 | ||
|
|
95ac0a861a | ||
|
|
746c301f39 | ||
|
|
6455b12b58 | ||
|
|
485b8fe993 | ||
|
|
d7527f280c | ||
|
|
d57fa4375d | ||
|
|
d9e42c6625 | ||
|
|
28293d3fce | ||
|
|
d505401446 |
@@ -1,5 +1,7 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
@@ -19,7 +21,7 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['packages/*cli*/**/*.js', '*-cli.js'],
|
||||
files: ['cli.js', '*-cli.js', 'packages/*cli*/**/*.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
|
||||
@@ -46,6 +46,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.24.6"
|
||||
"xen-api": "^0.25.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"xo-remote-parser": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -55,6 +55,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,6 +410,18 @@ export default class RemoteHandlerAbstract {
|
||||
await this._unlink(file).catch(ignoreEnoent)
|
||||
}
|
||||
|
||||
async write(
|
||||
file: File,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<{| bytesWritten: number, buffer: Buffer |}> {
|
||||
await this._write(
|
||||
typeof file === 'string' ? normalizePath(file) : file,
|
||||
buffer,
|
||||
position
|
||||
)
|
||||
}
|
||||
|
||||
async writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
@@ -546,6 +558,28 @@ export default class RemoteHandlerAbstract {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _write(file: File, buffer: Buffer, position: number): Promise<void> {
|
||||
const isPath = typeof file === 'string'
|
||||
if (isPath) {
|
||||
file = await this.openFile(file, 'r+')
|
||||
}
|
||||
try {
|
||||
return await this._writeFd(file, buffer, position)
|
||||
} finally {
|
||||
if (isPath) {
|
||||
await this.closeFile(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _writeFd(
|
||||
fd: FileDescriptor,
|
||||
buffer: Buffer,
|
||||
position: number
|
||||
): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _writeFile(
|
||||
file: string,
|
||||
data: Data,
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import getStream from 'get-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { random } from 'lodash'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -310,5 +310,45 @@ handlers.forEach(url => {
|
||||
await handler.unlink('file')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#write()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
|
||||
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
|
||||
|
||||
forOwn(
|
||||
{
|
||||
'dont increase file size': (() => {
|
||||
const offset = random(0, TEST_DATA_LEN - PATCH_DATA_LEN)
|
||||
|
||||
const expected = Buffer.from(TEST_DATA)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
'increase file size': (() => {
|
||||
const offset = random(
|
||||
TEST_DATA_LEN - PATCH_DATA_LEN + 1,
|
||||
TEST_DATA_LEN
|
||||
)
|
||||
|
||||
const expected = Buffer.alloc(offset + PATCH_DATA_LEN)
|
||||
TEST_DATA.copy(expected)
|
||||
PATCH_DATA.copy(expected, offset)
|
||||
|
||||
return { offset, expected }
|
||||
})(),
|
||||
},
|
||||
({ offset, expected }, title) => {
|
||||
describe(title, () => {
|
||||
testWithFileDescriptor('file', 'r+', async ({ file }) => {
|
||||
await handler.write(file, PATCH_DATA, offset)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -110,6 +110,10 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
return fs.unlink(this._getFilePath(file))
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
|
||||
@@ -159,6 +159,10 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client
|
||||
.writeFile(this._getFilePath(file), data, options)
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
@@ -48,6 +48,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
8
@xen-orchestra/multi-counter/index.js
Normal file
@@ -0,0 +1,8 @@
|
||||
const handler = {
|
||||
get(target, property) {
|
||||
const value = target[property]
|
||||
return value !== undefined ? value : 0
|
||||
},
|
||||
}
|
||||
|
||||
export const create = () => new Proxy({ __proto__: null }, handler)
|
||||
14
@xen-orchestra/multi-counter/package.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "@xen-orchestra/multi-counter",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/multi-counter",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/multi-counter",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.0.0",
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
87
CHANGELOG.md
@@ -1,6 +1,58 @@
|
||||
# ChangeLog
|
||||
|
||||
## Next (2019-03-19)
|
||||
## **5.34.0** (2019-04-30)
|
||||
|
||||
### Highlights
|
||||
|
||||
- [Self/New VM] Add network config box to custom cloud-init [#3872](https://github.com/vatesfr/xen-orchestra/issues/3872) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4150))
|
||||
- [Metadata backup] Detailed logs [#4005](https://github.com/vatesfr/xen-orchestra/issues/4005) (PR [#4014](https://github.com/vatesfr/xen-orchestra/pull/4014))
|
||||
- [Backup reports] Support metadata backups (PR [#4084](https://github.com/vatesfr/xen-orchestra/pull/4084))
|
||||
- [VM migration] Auto select default SR and collapse optional actions [#3326](https://github.com/vatesfr/xen-orchestra/issues/3326) (PR [#4121](https://github.com/vatesfr/xen-orchestra/pull/4121))
|
||||
- Unlock basic stats on all editions [#4166](https://github.com/vatesfr/xen-orchestra/issues/4166) (PR [#4172](https://github.com/vatesfr/xen-orchestra/pull/4172))
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Settings/remotes] Expose mount options field for SMB [#4063](https://github.com/vatesfr/xen-orchestra/issues/4063) (PR [#4067](https://github.com/vatesfr/xen-orchestra/pull/4067))
|
||||
- [Backup/Schedule] Add warning regarding DST when you add a schedule [#4042](https://github.com/vatesfr/xen-orchestra/issues/4042) (PR [#4056](https://github.com/vatesfr/xen-orchestra/pull/4056))
|
||||
- [Import] Avoid blocking the UI when dropping a big OVA file on the UI (PR [#4018](https://github.com/vatesfr/xen-orchestra/pull/4018))
|
||||
- [Backup NG/Overview] Make backup list title clearer [#4111](https://github.com/vatesfr/xen-orchestra/issues/4111) (PR [#4129](https://github.com/vatesfr/xen-orchestra/pull/4129))
|
||||
- [Dashboard] Hide "Report" section for non-admins [#4123](https://github.com/vatesfr/xen-orchestra/issues/4123) (PR [#4126](https://github.com/vatesfr/xen-orchestra/pull/4126))
|
||||
- [Self/New VM] Display confirmation modal when user will use a large amount of resources [#4044](https://github.com/vatesfr/xen-orchestra/issues/4044) (PR [#4127](https://github.com/vatesfr/xen-orchestra/pull/4127))
|
||||
- [VDI migration, New disk] Warning when SR host is different from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4035](https://github.com/vatesfr/xen-orchestra/pull/4035))
|
||||
- [Attach disk] Display warning message when VDI SR is on different host from the other disks [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#4117](https://github.com/vatesfr/xen-orchestra/pull/4117))
|
||||
- [Editable] Notify user when editable undo fails [#3799](https://github.com/vatesfr/xen-orchestra/issues/3799) (PR [#4150](https://github.com/vatesfr/xen-orchestra/pull/4157))
|
||||
- [XO] Add banner for sources users to clarify support conditions [#4165](https://github.com/vatesfr/xen-orchestra/issues/4165) (PR [#4167](https://github.com/vatesfr/xen-orchestra/pull/4167))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Continuous Replication] Fix VHD size guess for empty files [#4105](https://github.com/vatesfr/xen-orchestra/issues/4105) (PR [#4107](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Backup NG] Only display full backup interval in case of a delta backup (PR [#4125](https://github.com/vatesfr/xen-orchestra/pull/4107))
|
||||
- [Dashboard/Health] fix 'an error has occurred' on the storage state table [#4128](https://github.com/vatesfr/xen-orchestra/issues/4128) (PR [#4132](https://github.com/vatesfr/xen-orchestra/pull/4132))
|
||||
- [Menu] XOA: Fixed empty slot when menu is collapsed [#4012](https://github.com/vatesfr/xen-orchestra/issues/4012) (PR [#4068](https://github.com/vatesfr/xen-orchestra/pull/4068)
|
||||
- [Self/New VM] Fix missing templates when refreshing page [#3265](https://github.com/vatesfr/xen-orchestra/issues/3265) (PR [#3565](https://github.com/vatesfr/xen-orchestra/pull/3565))
|
||||
- [Home] No more false positives when select Tag on Home page [#4087](https://github.com/vatesfr/xen-orchestra/issues/4087) (PR [#4112](https://github.com/vatesfr/xen-orchestra/pull/4112))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-backup-reports v0.16.0
|
||||
- complex-matcher v0.6.0
|
||||
- xo-vmdk-to-vhd v0.1.7
|
||||
- vhd-lib v0.6.1
|
||||
- xo-server v5.40.0
|
||||
- xo-web v5.40.1
|
||||
|
||||
## **5.33.1** (2019-04-04)
|
||||
|
||||
### Bug fix
|
||||
|
||||
- Fix major memory leak [2563be4](https://github.com/vatesfr/xen-orchestra/commit/2563be472bfd84c6ed867efd21c4aeeb824d387f)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.1
|
||||
- xo-server v5.38.2
|
||||
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
@@ -13,6 +65,20 @@
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
- Improve connection to XCP-ng/XenServer hosts:
|
||||
- never disconnect by itself even in case of errors
|
||||
- never stop watching events
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -22,6 +88,25 @@
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.25.0
|
||||
- vhd-lib v0.6.0
|
||||
- @xen-orchestra/fs v0.8.0
|
||||
- xo-server-usage-report v0.7.2
|
||||
- xo-server v5.38.1
|
||||
- xo-web v5.38.0
|
||||
|
||||
## **5.32.2** (2019-02-28)
|
||||
|
||||
|
||||
@@ -2,36 +2,16 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
- [Pool/Patches] Fix "an error has occurred" in "Applied patches" [#4192](https://github.com/vatesfr/xen-orchestra/issues/4192) (PR [#4193](https://github.com/vatesfr/xen-orchestra/pull/4193))
|
||||
- [Backup NG] Fix report sent even though "Never" is selected [#4092](https://github.com/vatesfr/xen-orchestra/issues/4092) (PR [#4178](https://github.com/vatesfr/xen-orchestra/pull/4178))
|
||||
- [Remotes] Fix issues after a config import (PR [#4197](https://github.com/vatesfr/xen-orchestra/pull/4197))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.24.6
|
||||
- vhd-lib v0.6.0
|
||||
- @xen-orchestra/fs v0.8.0
|
||||
- xo-server-usage-report v0.7.2
|
||||
- xo-server v5.38.0
|
||||
- xo-web v5.38.0
|
||||
- xo-server-backup-reports v0.16.1
|
||||
- @xen-orchestra/fs v0.9.0
|
||||
- vhd-lib v0.7.0
|
||||
- xo-server v5.41.0
|
||||
- xo-web v5.41.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Xen Orchestra [](https://go.crisp.im/chat/embed/?website_id=-JzqzzwddSV7bKGtEyAQ) [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
# Xen Orchestra [](https://travis-ci.org/vatesfr/xen-orchestra)
|
||||
|
||||

|
||||
|
||||
|
||||
BIN
docs/assets/metadata-1.png
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
BIN
docs/assets/metadata-2.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
docs/assets/metadata-3.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/metadata-4.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/assets/metadata-5.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/assets/metadata-6.png
Normal file
|
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/metadata-7.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
@@ -1,13 +1,13 @@
|
||||
|
||||
# Installation
|
||||
|
||||
SSH to your XenServer and execute the following:
|
||||
SSH to your XenServer/XCP-ng host and execute the following:
|
||||
|
||||
```
|
||||
bash -c "$(curl -s http://xoa.io/deploy)"
|
||||
```
|
||||
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your XenServer host itself, it's 100% safe.
|
||||
This will automatically download/import/start the XOA appliance. Nothing is changed on your host itself, it's 100% safe.
|
||||
|
||||
## [More on XOA](xoa.md)
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Metadata backup
|
||||
|
||||
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
|
||||
> WARNING: Metadata backup is an experimental feature. Unexpected issues are possible, but unlikely.
|
||||
|
||||
## Introduction
|
||||
|
||||
@@ -11,21 +11,38 @@ In Xen Orchestra, Metadata backup is divided into two different options:
|
||||
* Pool metadata backup
|
||||
* XO configuration backup
|
||||
|
||||
### How to use metadata backup
|
||||
### Performing a backup
|
||||
|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
|
||||

|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata:
|
||||

|
||||
|
||||
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
|
||||
|
||||

|
||||

|
||||
|
||||
Define the name and retention for the job.
|
||||
|
||||

|
||||

|
||||
|
||||
Once created, the job is displayed with the other classic jobs.
|
||||
|
||||

|
||||

|
||||
|
||||
> Restore for metadata backup jobs should be available in XO 5.33
|
||||
|
||||
### Performing a restore
|
||||
|
||||
> WARNING: restoring pool metadata completely overwrites the XAPI database of a host. Only perform a metadata restore if it is a new server with nothing running on it (eg replacing a host with new hardware).
|
||||
|
||||
If you browse to the Backup NG Restore panel, you will now notice a Metadata filter button:
|
||||
|
||||

|
||||
|
||||
If you click this button, it will show you Metadata backups available for restore:
|
||||
|
||||

|
||||
|
||||
You can see both our Xen Orchestra config backup, and our pool metadata backup. To restore one, simply click the blue restore arrow, choose a backup date to restore, and click OK:
|
||||
|
||||

|
||||
|
||||
That's it!
|
||||
@@ -1,24 +1,33 @@
|
||||
# Support
|
||||
|
||||
You can access our pro support if you subscribe to any of these plans:
|
||||
Xen Orchestra will run in a controlled/tested environment thanks to XOA ([Xen Orchestra virtual Appliance](https://xen-orchestra.com/#!/xoa)). **This is the way to get pro support**. Any account with a registered XOA can access a [dedicated support panel](https://xen-orchestra.com/#!/member/support).
|
||||
|
||||
XOA is available in multiple plans:
|
||||
|
||||
* Free
|
||||
* Starter
|
||||
* Enterprise
|
||||
* Premium
|
||||
|
||||
The better the plan, the faster the support will be with higher priority.
|
||||
Higher tier support plans include faster ticket response times (and cover more features). Paid support plans and response times are based on the plan you have, plans can be [reviewed here](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
## XOA Free support
|
||||
|
||||
With the free version of the Xen Orchestra Appliance (XOA free), you can open support tickets and we will do our best to assist you, however, this support is limited and is not guaranteed in regards to response times or resolutions offered.
|
||||
|
||||
## Community support
|
||||
|
||||
If you are using Xen Orchestra via the sources, you can ask questions and try to recieve help two different ways:
|
||||
If you are using Xen Orchestra via the source and not XOA, you can ask questions and try to recieve help through a number of different ways:
|
||||
|
||||
* In our [forum](https://xen-orchestra.com/forum/)
|
||||
* In our [forum](https://xcp-ng.org/forum/category/12/xen-orchestra)
|
||||
* In our IRC - `#xen-orchestra` on `Freenode`
|
||||
|
||||
However, there's no guarantee you will receive an answer and no guaranteed response time. If you are using XO from sources, we encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
We encourage you to give back to the community by assisting other users via these two avenues as well.
|
||||
|
||||
If you are using Xen Orchestra in production, please subscribe to a plan.
|
||||
Lastly while Xen Orchestra is free and Open Source software, supporting and developing it takes a lot of effort. If you are considering using Xen Orchestra in production, please subscribe for one of our [professional support plans](https://xen-orchestra.com/#!/xo-pricing).
|
||||
|
||||
> Note: support from the sources is harder, because Xen Orchestra can potentially run on any Linux distro (or even FreeBSD and Windows!). Always try to double check that you followed our guide on how to [install it from the sources](https://xen-orchestra.com/docs/from_the_sources.html) before going further.
|
||||
|
||||
## Open a ticket
|
||||
|
||||
If you have a subscription, you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
If you have a subscription (or at least a registered free XOA), you can open a ticket describing your issue directly from your personal account page [here](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
@@ -10,15 +10,16 @@
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-eslint-comments": "^3.1.1",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^8.0.0",
|
||||
"eslint-plugin-promise": "^4.0.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.95.1",
|
||||
"flow-bin": "^0.98.0",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"husky": "^2.2.0",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
@@ -44,6 +44,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -599,6 +599,13 @@ export const parse = parser.parse.bind(parser)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const _extractStringFromRegexp = child => {
|
||||
const unescapedRegexp = child.re.source.replace(/^(\^)|\\|\$$/g, '')
|
||||
if (child.re.source === `^${escapeRegExp(unescapedRegexp)}$`) {
|
||||
return unescapedRegexp
|
||||
}
|
||||
}
|
||||
|
||||
const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof Or) {
|
||||
const strings = []
|
||||
@@ -606,6 +613,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
strings.push(child.value)
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
strings.push(unescapedRegexp)
|
||||
}
|
||||
}
|
||||
})
|
||||
return strings
|
||||
}
|
||||
@@ -613,6 +626,12 @@ const _getPropertyClauseStrings = ({ child }) => {
|
||||
if (child instanceof StringNode) {
|
||||
return [child.value]
|
||||
}
|
||||
if (child instanceof RegExpNode) {
|
||||
const unescapedRegexp = _extractStringFromRegexp(child)
|
||||
if (unescapedRegexp !== undefined) {
|
||||
return [unescapedRegexp]
|
||||
}
|
||||
}
|
||||
|
||||
return []
|
||||
}
|
||||
|
||||
@@ -12,10 +12,13 @@ import {
|
||||
} from './'
|
||||
|
||||
it('getPropertyClausesStrings', () => {
|
||||
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar)'))
|
||||
const tmp = getPropertyClausesStrings(
|
||||
parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/')
|
||||
)
|
||||
expect(tmp).toEqual({
|
||||
bar: ['baz'],
|
||||
baz: ['foo', 'bar'],
|
||||
baz: ['foo', 'bar', 'boo', 'far'],
|
||||
foo: ['bar'],
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
@@ -43,6 +43,7 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.6.0"
|
||||
"vhd-lib": "^0.6.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -44,13 +44,14 @@
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -22,7 +22,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0",
|
||||
"core-js": "^3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
@@ -44,7 +44,7 @@
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
"tmp": "^0.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
@@ -52,6 +52,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,9 +19,7 @@ export default bat => {
|
||||
j += 4
|
||||
|
||||
if (j === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
return
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
|
||||
@@ -23,71 +23,110 @@ afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function convertFromRawToVhd(rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
const RAW = 'raw'
|
||||
const VHD = 'vpc'
|
||||
const convert = (inputFormat, inputFile, outputFormat, outputFile) =>
|
||||
execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
inputFormat,
|
||||
'-O',
|
||||
outputFormat,
|
||||
inputFile,
|
||||
outputFile,
|
||||
])
|
||||
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
let requested = Math.min(size, yield)
|
||||
while (size > 0) {
|
||||
const buf = Buffer.allocUnsafe(requested)
|
||||
for (let i = 0; i < requested; ++i) {
|
||||
buf[i] = Math.floor(Math.random() * 256)
|
||||
}
|
||||
requested = Math.min((size -= requested), yield buf)
|
||||
}
|
||||
})
|
||||
|
||||
async function createRandomFile(name, size) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = await createRandomStream(size)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
test('createVhdStreamWithLength can extract length', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
})
|
||||
const forOwn = (object, cb) =>
|
||||
Object.keys(object).forEach(key => cb(object[key], key, object))
|
||||
|
||||
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
describe('createVhdStreamWithLength', () => {
|
||||
forOwn(
|
||||
{
|
||||
// qemu-img requires this length or it fill with null bytes which breaks
|
||||
// the test
|
||||
'can extract length': 34816,
|
||||
|
||||
'can handle empty file': 0,
|
||||
},
|
||||
(size, title) =>
|
||||
it(title, async () => {
|
||||
const inputRaw = `${tempDir}/input.raw`
|
||||
await createRandomFile(inputRaw, size)
|
||||
|
||||
const inputVhd = `${tempDir}/input.vhd`
|
||||
await convert(RAW, inputRaw, VHD, inputVhd)
|
||||
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(inputVhd)
|
||||
)
|
||||
const { length } = result
|
||||
|
||||
const outputVhd = `${tempDir}/output.vhd`
|
||||
await pFromCallback(
|
||||
pipeline.bind(undefined, result, await createWriteStream(outputVhd))
|
||||
)
|
||||
|
||||
// ensure the guessed length correspond to the stream length
|
||||
const { size: outputSize } = await fs.stat(outputVhd)
|
||||
expect(length).toEqual(outputSize)
|
||||
|
||||
// ensure the generated VHD is correct and contains the same data
|
||||
const outputRaw = `${tempDir}/output.raw`
|
||||
await convert(VHD, outputVhd, RAW, outputRaw)
|
||||
await execa('cmp', [inputRaw, outputRaw])
|
||||
})
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
it('can skip blank after the last block and before the footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convert(RAW, rawFileName, VHD, vhdName)
|
||||
const { size: vhdSize } = await fs.stat(vhdName)
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const { size: longerSize } = await fs.stat(vhdName)
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const { size: outputSize } = await fs.stat(outputVhdName)
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const longerSize = fs.statSync(vhdName).size
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
|
||||
@@ -63,10 +63,14 @@ export default async function createVhdStreamWithLength(stream) {
|
||||
stream.unshift(buf)
|
||||
}
|
||||
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(table)
|
||||
const footerOffset =
|
||||
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
|
||||
header.blockSize
|
||||
firstAndLastBlocks !== undefined
|
||||
? firstAndLastBlocks.lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) *
|
||||
SECTOR_SIZE +
|
||||
header.blockSize
|
||||
: Math.ceil(streamPosition / SECTOR_SIZE) * SECTOR_SIZE
|
||||
|
||||
// ignore any data after footerOffset and push footerBuffer
|
||||
//
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import constantStream from './_constant-stream'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
@@ -232,64 +230,45 @@ export default class Vhd {
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
// Write a buffer at a given position in a vhd file.
|
||||
async _write(data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
assert(Buffer.isBuffer(data))
|
||||
debug(`_write offset=${offset} size=${data.length}`)
|
||||
return this._handler.write(this._path, data, offset)
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
|
||||
this.blockTable
|
||||
const firstAndLastBlocks = getFirstAndLastBlocks(this.blockTable)
|
||||
if (firstAndLastBlocks === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { first, firstSector, lastSector } = firstAndLastBlocks
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const block = await this._read(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
// copy the first block at the end
|
||||
const block = await this._read(sectorsToBytes(firstSector), fullBlockSize)
|
||||
await this._write(block, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,7 +291,7 @@ export default class Vhd {
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
Buffer.alloc(maxTableEntries - prevMaxTableEntries, BUF_BLOCK_UNUSED),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
@@ -337,10 +316,7 @@ export default class Vhd {
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
this._write(Buffer.alloc(this.fullBlockSize), sectorsToBytes(blockAddr)),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.24.6"
|
||||
"xen-api": "^0.25.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
@@ -56,6 +56,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.24.6",
|
||||
"version": "0.25.1",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -33,12 +33,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"iterable-backoff": "^0.1.0",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
@@ -69,6 +69,7 @@
|
||||
"plot": "gnuplot -p memory-test.gnu",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import minimist from 'minimist'
|
||||
import pw from 'pw'
|
||||
import { asCallback, fromCallback } from 'promise-toolbox'
|
||||
import { filter, find, isArray } from 'lodash'
|
||||
import { getBoundPropertyDescriptor } from 'bind-property-descriptor'
|
||||
import { start as createRepl } from 'repl'
|
||||
|
||||
import { createClient } from './'
|
||||
@@ -25,6 +26,20 @@ function askPassword(prompt = 'Password: ') {
|
||||
})
|
||||
}
|
||||
|
||||
const { getPrototypeOf, ownKeys } = Reflect
|
||||
function getAllBoundDescriptors(object) {
|
||||
const descriptors = { __proto__: null }
|
||||
let current = object
|
||||
do {
|
||||
ownKeys(current).forEach(key => {
|
||||
if (!(key in descriptors)) {
|
||||
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
|
||||
}
|
||||
})
|
||||
} while ((current = getPrototypeOf(current)) !== null)
|
||||
return descriptors
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const usage = 'Usage: xen-api <url> [<user> [<password>]]'
|
||||
@@ -78,11 +93,17 @@ const main = async args => {
|
||||
const repl = createRepl({
|
||||
prompt: `${xapi._humanId}> `,
|
||||
})
|
||||
repl.context.xapi = xapi
|
||||
|
||||
repl.context.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
repl.context.find = predicate => find(xapi.objects.all, predicate)
|
||||
repl.context.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
{
|
||||
const ctx = repl.context
|
||||
ctx.xapi = xapi
|
||||
|
||||
ctx.diff = (a, b) => console.log('%s', diff(a, b))
|
||||
ctx.find = predicate => find(xapi.objects.all, predicate)
|
||||
ctx.findAll = predicate => filter(xapi.objects.all, predicate)
|
||||
|
||||
Object.defineProperties(ctx, getAllBoundDescriptors(xapi))
|
||||
}
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
|
||||
@@ -4,26 +4,15 @@ import kindOf from 'kindof'
|
||||
import ms from 'ms'
|
||||
import httpRequest from 'http-request-plus'
|
||||
import { EventEmitter } from 'events'
|
||||
import { fibonacci } from 'iterable-backoff'
|
||||
import {
|
||||
forEach,
|
||||
forOwn,
|
||||
isArray,
|
||||
map,
|
||||
noop,
|
||||
omit,
|
||||
reduce,
|
||||
startsWith,
|
||||
} from 'lodash'
|
||||
import { isArray, map, noop, omit } from 'lodash'
|
||||
import {
|
||||
cancelable,
|
||||
defer,
|
||||
fromEvents,
|
||||
ignoreErrors,
|
||||
pCatch,
|
||||
pDelay,
|
||||
pRetry,
|
||||
pTimeout,
|
||||
TimeoutError,
|
||||
} from 'promise-toolbox'
|
||||
|
||||
import autoTransport from './transports/auto'
|
||||
@@ -43,54 +32,9 @@ import XapiError from './_XapiError'
|
||||
// in seconds!
|
||||
const EVENT_TIMEOUT = 60
|
||||
|
||||
// http://www.gnu.org/software/libc/manual/html_node/Error-Codes.html
|
||||
const NETWORK_ERRORS = {
|
||||
// Connection has been closed outside of our control.
|
||||
ECONNRESET: true,
|
||||
|
||||
// Connection has been aborted locally.
|
||||
ECONNABORTED: true,
|
||||
|
||||
// Host is up but refuses connection (typically: no such service).
|
||||
ECONNREFUSED: true,
|
||||
|
||||
// TODO: ??
|
||||
EINVAL: true,
|
||||
|
||||
// Host is not reachable (does not respond).
|
||||
EHOSTUNREACH: true,
|
||||
|
||||
// network is unreachable
|
||||
ENETUNREACH: true,
|
||||
|
||||
// Connection configured timed out has been reach.
|
||||
ETIMEDOUT: true,
|
||||
}
|
||||
|
||||
const isNetworkError = ({ code }) => NETWORK_ERRORS[code]
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const XAPI_NETWORK_ERRORS = {
|
||||
HOST_STILL_BOOTING: true,
|
||||
HOST_HAS_NO_MANAGEMENT_IP: true,
|
||||
}
|
||||
|
||||
const isXapiNetworkError = ({ code }) => XAPI_NETWORK_ERRORS[code]
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const areEventsLost = ({ code }) => code === 'EVENTS_LOST'
|
||||
|
||||
const isHostSlave = ({ code }) => code === 'HOST_IS_SLAVE'
|
||||
|
||||
const isMethodUnknown = ({ code }) => code === 'MESSAGE_METHOD_UNKNOWN'
|
||||
|
||||
const isSessionInvalid = ({ code }) => code === 'SESSION_INVALID'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const { defineProperties, freeze, keys: getKeys } = Object
|
||||
const { defineProperties, defineProperty, freeze, keys: getKeys } = Object
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -98,10 +42,6 @@ export const NULL_REF = 'OpaqueRef:NULL'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const getKey = o => o.$id
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const RESERVED_FIELDS = {
|
||||
id: true,
|
||||
pool: true,
|
||||
@@ -120,17 +60,15 @@ const CONNECTED = 'connected'
|
||||
const CONNECTING = 'connecting'
|
||||
const DISCONNECTED = 'disconnected'
|
||||
|
||||
// timeout of XenAPI HTTP connections
|
||||
const HTTP_TIMEOUT = 24 * 3600 * 1e3
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export class Xapi extends EventEmitter {
|
||||
constructor(opts) {
|
||||
super()
|
||||
|
||||
this._callTimeout = makeCallSetting(opts.callTimeout, 0)
|
||||
this._callTimeout = makeCallSetting(opts.callTimeout, 60 * 60 * 1e3) // 1 hour but will be reduced in the future
|
||||
this._httpInactivityTimeout = opts.httpInactivityTimeout ?? 5 * 60 * 1e3 // 5 mins
|
||||
this._eventPollDelay = opts.eventPollDelay ?? 60 * 1e3 // 1 min
|
||||
this._pool = null
|
||||
this._readOnly = Boolean(opts.readOnly)
|
||||
this._RecordsByType = { __proto__: null }
|
||||
@@ -154,21 +92,22 @@ export class Xapi extends EventEmitter {
|
||||
this._allowUnauthorized = opts.allowUnauthorized
|
||||
this._setUrl(url)
|
||||
|
||||
this._connecting = undefined
|
||||
this._connected = new Promise(resolve => {
|
||||
this._resolveConnected = resolve
|
||||
})
|
||||
this._disconnected = Promise.resolve()
|
||||
this._sessionId = undefined
|
||||
this._status = DISCONNECTED
|
||||
;(this._objects = new Collection()).getKey = getKey
|
||||
this._debounce = opts.debounce == null ? 200 : opts.debounce
|
||||
|
||||
this._debounce = opts.debounce ?? 200
|
||||
this._objects = new Collection()
|
||||
this._objectsByRef = { __proto__: null }
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
this._eventWatchers = { __proto__: null }
|
||||
this._taskWatchers = { __proto__: null }
|
||||
this._watchedTypes = undefined
|
||||
this._watching = false
|
||||
|
||||
this.on(DISCONNECTED, this._clearObjects)
|
||||
this._clearObjects()
|
||||
|
||||
const { watchEvents } = opts
|
||||
if (watchEvents !== false) {
|
||||
if (isArray(watchEvents)) {
|
||||
@@ -203,28 +142,17 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
get sessionId() {
|
||||
const id = this._sessionId
|
||||
|
||||
if (id === undefined || id === CONNECTING) {
|
||||
throw new Error('sessionId is only available when connected')
|
||||
}
|
||||
|
||||
return id
|
||||
assert(this._status === CONNECTED)
|
||||
return this._sessionId
|
||||
}
|
||||
|
||||
get status() {
|
||||
const id = this._sessionId
|
||||
|
||||
return id === undefined
|
||||
? DISCONNECTED
|
||||
: id === CONNECTING
|
||||
? CONNECTING
|
||||
: CONNECTED
|
||||
return this._status
|
||||
}
|
||||
|
||||
connect = coalesceCalls(this.connect)
|
||||
async connect() {
|
||||
const { status } = this
|
||||
const status = this._status
|
||||
|
||||
if (status === CONNECTED) {
|
||||
return
|
||||
@@ -232,21 +160,18 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
assert(status === DISCONNECTED)
|
||||
|
||||
const auth = this._auth
|
||||
|
||||
this._sessionId = CONNECTING
|
||||
this._status = CONNECTING
|
||||
this._disconnected = new Promise(resolve => {
|
||||
this._resolveDisconnected = resolve
|
||||
})
|
||||
|
||||
try {
|
||||
const [methods, sessionId] = await Promise.all([
|
||||
this._transportCall('system.listMethods', []),
|
||||
this._transportCall('session.login_with_password', [
|
||||
auth.user,
|
||||
auth.password,
|
||||
]),
|
||||
])
|
||||
await this._sessionOpen()
|
||||
|
||||
// Uses introspection to list available types.
|
||||
const types = (this._types = methods
|
||||
const types = (this._types = (await this._interruptOnDisconnect(
|
||||
this._call('system.listMethods')
|
||||
))
|
||||
.filter(isGetAllRecordsMethod)
|
||||
.map(method => method.slice(0, method.indexOf('.'))))
|
||||
this._lcToTypes = { __proto__: null }
|
||||
@@ -257,13 +182,10 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
})
|
||||
|
||||
this._sessionId = sessionId
|
||||
this._pool = (await this.getAllRecords('pool'))[0]
|
||||
|
||||
debug('%s: connected', this._humanId)
|
||||
this._disconnected = new Promise(resolve => {
|
||||
this._resolveDisconnected = resolve
|
||||
})
|
||||
this._status = CONNECTED
|
||||
this._resolveConnected()
|
||||
this._resolveConnected = undefined
|
||||
this.emit(CONNECTED)
|
||||
@@ -275,7 +197,7 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
const { status } = this
|
||||
const status = this._status
|
||||
|
||||
if (status === DISCONNECTED) {
|
||||
return
|
||||
@@ -285,9 +207,6 @@ export class Xapi extends EventEmitter {
|
||||
this._connected = new Promise(resolve => {
|
||||
this._resolveConnected = resolve
|
||||
})
|
||||
|
||||
this._resolveDisconnected()
|
||||
this._resolveDisconnected = undefined
|
||||
} else {
|
||||
assert(status === CONNECTING)
|
||||
}
|
||||
@@ -295,11 +214,14 @@ export class Xapi extends EventEmitter {
|
||||
const sessionId = this._sessionId
|
||||
if (sessionId !== undefined) {
|
||||
this._sessionId = undefined
|
||||
ignoreErrors.call(this._transportCall('session.logout', [sessionId]))
|
||||
ignoreErrors.call(this._call('session.logout', [sessionId]))
|
||||
}
|
||||
|
||||
debug('%s: disconnected', this._humanId)
|
||||
|
||||
this._status = DISCONNECTED
|
||||
this._resolveDisconnected()
|
||||
this._resolveDisconnected = undefined
|
||||
this.emit(DISCONNECTED)
|
||||
}
|
||||
|
||||
@@ -569,6 +491,10 @@ export class Xapi extends EventEmitter {
|
||||
return this._objects
|
||||
}
|
||||
|
||||
get objectsFetched() {
|
||||
return this._objectsFetched
|
||||
}
|
||||
|
||||
// ensure we have received all events up to this call
|
||||
//
|
||||
// optionally returns the up to date object for the given ref
|
||||
@@ -652,26 +578,16 @@ export class Xapi extends EventEmitter {
|
||||
// Objects ids are already UUIDs if they have one.
|
||||
const object = this._objects.all[uuid]
|
||||
|
||||
if (object) return object
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('no object with UUID: ' + uuid)
|
||||
}
|
||||
|
||||
// manually run events watching if set to `false` in constructor
|
||||
watchEvents() {
|
||||
this._eventWatchers = { __proto__: null }
|
||||
|
||||
this._taskWatchers = { __proto__: null }
|
||||
|
||||
if (this.status === CONNECTED) {
|
||||
this._watchEventsWrapper()
|
||||
}
|
||||
|
||||
this.on('connected', this._watchEventsWrapper)
|
||||
this.on('disconnected', () => {
|
||||
this._objects.clear()
|
||||
})
|
||||
ignoreErrors.call(this._watchEvents())
|
||||
}
|
||||
|
||||
watchTask(ref) {
|
||||
@@ -703,13 +619,42 @@ export class Xapi extends EventEmitter {
|
||||
// Private
|
||||
// ===========================================================================
|
||||
|
||||
_clearObjects() {
|
||||
;(this._objectsByRef = { __proto__: null })[NULL_REF] = undefined
|
||||
this._nTasks = 0
|
||||
this._objects.clear()
|
||||
this.objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
async _call(method, args, timeout = this._callTimeout(method, args)) {
|
||||
const startTime = Date.now()
|
||||
try {
|
||||
const result = await pTimeout.call(this._transport(method, args), timeout)
|
||||
debug(
|
||||
'%s: %s(...) [%s] ==> %s',
|
||||
this._humanId,
|
||||
method,
|
||||
ms(Date.now() - startTime),
|
||||
kindOf(result)
|
||||
)
|
||||
return result
|
||||
} catch (e) {
|
||||
const error = e instanceof Error ? e : XapiError.wrap(e)
|
||||
|
||||
// do not log the session ID
|
||||
//
|
||||
// TODO: should log at the session level to avoid logging sensitive
|
||||
// values?
|
||||
const params = args[0] === this._sessionId ? args.slice(1) : args
|
||||
|
||||
error.call = {
|
||||
method,
|
||||
params: replaceSensitiveValues(params, '* obfuscated *'),
|
||||
}
|
||||
|
||||
debug(
|
||||
'%s: %s(...) [%s] =!> %s',
|
||||
this._humanId,
|
||||
method,
|
||||
ms(Date.now() - startTime),
|
||||
error
|
||||
)
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// return a promise which resolves to a task ref or undefined
|
||||
@@ -726,41 +671,79 @@ export class Xapi extends EventEmitter {
|
||||
return Promise.resolve(task)
|
||||
}
|
||||
|
||||
// Medium level call: handle session errors.
|
||||
_sessionCall(method, args, timeout = this._callTimeout(method, args)) {
|
||||
try {
|
||||
if (startsWith(method, 'session.')) {
|
||||
throw new Error('session.*() methods are disabled from this interface')
|
||||
}
|
||||
_interruptOnDisconnect(promise) {
|
||||
let listener
|
||||
const pWrapper = new Promise((resolve, reject) => {
|
||||
promise.then(resolve, reject)
|
||||
this.on(
|
||||
DISCONNECTED,
|
||||
(listener = () => {
|
||||
reject(new Error('disconnected'))
|
||||
})
|
||||
)
|
||||
})
|
||||
const clean = () => {
|
||||
this.removeListener(DISCONNECTED, listener)
|
||||
}
|
||||
pWrapper.then(clean, clean)
|
||||
return pWrapper
|
||||
}
|
||||
|
||||
const newArgs = [this.sessionId]
|
||||
_sessionCallRetryOptions = {
|
||||
tries: 2,
|
||||
when: error =>
|
||||
this._status !== DISCONNECTED && error?.code === 'SESSION_INVALID',
|
||||
onRetry: () => this._sessionOpen(),
|
||||
}
|
||||
_sessionCall(method, args, timeout) {
|
||||
if (method.startsWith('session.')) {
|
||||
return Promise.reject(
|
||||
new Error('session.*() methods are disabled from this interface')
|
||||
)
|
||||
}
|
||||
|
||||
return pRetry(() => {
|
||||
const sessionId = this._sessionId
|
||||
assert.notStrictEqual(sessionId, undefined)
|
||||
|
||||
const newArgs = [sessionId]
|
||||
if (args !== undefined) {
|
||||
newArgs.push.apply(newArgs, args)
|
||||
}
|
||||
|
||||
return pTimeout.call(
|
||||
pCatch.call(
|
||||
this._transportCall(method, newArgs),
|
||||
isSessionInvalid,
|
||||
() => {
|
||||
// XAPI is sometimes reinitialized and sessions are lost.
|
||||
// Try to login again.
|
||||
debug('%s: the session has been reinitialized', this._humanId)
|
||||
return this._call(method, newArgs, timeout)
|
||||
}, this._sessionCallRetryOptions)
|
||||
}
|
||||
|
||||
this._sessionId = undefined
|
||||
return this.connect().then(() => this._sessionCall(method, args))
|
||||
}
|
||||
// FIXME: (probably rare) race condition leading to unnecessary login when:
|
||||
// 1. two calls using an invalid session start
|
||||
// 2. one fails with SESSION_INVALID and renew the session by calling
|
||||
// `_sessionOpen`
|
||||
// 3. the session is renewed
|
||||
// 4. the second call fails with SESSION_INVALID which leads to a new
|
||||
// unnecessary renewal
|
||||
_sessionOpen = coalesceCalls(this._sessionOpen)
|
||||
async _sessionOpen() {
|
||||
const { user, password } = this._auth
|
||||
const params = [user, password]
|
||||
this._sessionId = await pRetry(
|
||||
() =>
|
||||
this._interruptOnDisconnect(
|
||||
this._call('session.login_with_password', params)
|
||||
),
|
||||
timeout
|
||||
)
|
||||
} catch (error) {
|
||||
return Promise.reject(error)
|
||||
}
|
||||
{
|
||||
tries: 2,
|
||||
when: { code: 'HOST_IS_SLAVE' },
|
||||
onRetry: error => {
|
||||
this._setUrl({ ...this._url, hostname: error.params[0] })
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_setUrl(url) {
|
||||
this._humanId = `${this._auth.user}@${url.hostname}`
|
||||
this._call = autoTransport({
|
||||
this._transport = autoTransport({
|
||||
allowUnauthorized: this._allowUnauthorized,
|
||||
url,
|
||||
})
|
||||
@@ -779,11 +762,15 @@ export class Xapi extends EventEmitter {
|
||||
// An object's UUID can change during its life.
|
||||
const prev = objectsByRef[ref]
|
||||
let prevUuid
|
||||
if (prev && (prevUuid = prev.uuid) && prevUuid !== object.uuid) {
|
||||
if (
|
||||
prev !== undefined &&
|
||||
(prevUuid = prev.uuid) !== undefined &&
|
||||
prevUuid !== object.uuid
|
||||
) {
|
||||
objects.remove(prevUuid)
|
||||
}
|
||||
|
||||
this._objects.set(object)
|
||||
this._objects.set(object.$id, object)
|
||||
objectsByRef[ref] = object
|
||||
|
||||
if (type === 'pool') {
|
||||
@@ -815,6 +802,7 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
|
||||
_processEvents(events) {
|
||||
const flush = this._objects.bufferEvents()
|
||||
events.forEach(event => {
|
||||
let type = event.class
|
||||
const lcToTypes = this._lcToTypes
|
||||
@@ -828,6 +816,54 @@ export class Xapi extends EventEmitter {
|
||||
this._addRecordToCache(type, ref, event.snapshot)
|
||||
}
|
||||
})
|
||||
flush()
|
||||
}
|
||||
|
||||
async _refreshCachedRecords(types) {
|
||||
const toRemoveByType = { __proto__: null }
|
||||
types.forEach(type => {
|
||||
toRemoveByType[type] = new Set()
|
||||
})
|
||||
const byRefs = this._objectsByRef
|
||||
getKeys(byRefs).forEach(ref => {
|
||||
const { $type } = byRefs[ref]
|
||||
const toRemove = toRemoveByType[$type]
|
||||
if (toRemove !== undefined) {
|
||||
toRemove.add(ref)
|
||||
}
|
||||
})
|
||||
|
||||
const flush = this._objects.bufferEvents()
|
||||
await Promise.all(
|
||||
types.map(async type => {
|
||||
try {
|
||||
const toRemove = toRemoveByType[type]
|
||||
const records = await this._sessionCall(`${type}.get_all_records`)
|
||||
const refs = getKeys(records)
|
||||
refs.forEach(ref => {
|
||||
toRemove.delete(ref)
|
||||
|
||||
// we can bypass _processEvents here because they are all *add*
|
||||
// event and all objects are of the same type
|
||||
this._addRecordToCache(type, ref, records[ref])
|
||||
})
|
||||
toRemove.forEach(ref => {
|
||||
this._removeRecordFromCache(type, ref)
|
||||
})
|
||||
|
||||
if (type === 'task') {
|
||||
this._nTasks = refs.length
|
||||
}
|
||||
} catch (error) {
|
||||
// there is nothing ideal to do here, do not interrupt event
|
||||
// handling
|
||||
if (error?.code !== 'MESSAGE_REMOVED') {
|
||||
console.warn('_refreshCachedRecords', type, error)
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
flush()
|
||||
}
|
||||
|
||||
_removeRecordFromCache(type, ref) {
|
||||
@@ -853,120 +889,80 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
// - prevent multiple watches
|
||||
// - swallow errors
|
||||
async _watchEventsWrapper() {
|
||||
if (!this._watching) {
|
||||
this._watching = true
|
||||
try {
|
||||
await this._watchEvents()
|
||||
} catch (error) {
|
||||
console.error('_watchEventsWrapper', error)
|
||||
}
|
||||
this._watching = false
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: cancelation
|
||||
_watchEvents = coalesceCalls(this._watchEvents)
|
||||
async _watchEvents() {
|
||||
this._clearObjects()
|
||||
|
||||
// compute the initial token for the event loop
|
||||
//
|
||||
// we need to do this before the initial fetch to avoid losing events
|
||||
let fromToken
|
||||
try {
|
||||
fromToken = await this._sessionCall('event.inject', [
|
||||
'pool',
|
||||
this._pool.$ref,
|
||||
])
|
||||
} catch (error) {
|
||||
if (isMethodUnknown(error)) {
|
||||
return this._watchEventsLegacy()
|
||||
}
|
||||
}
|
||||
|
||||
const types = this._watchedTypes || this._types
|
||||
|
||||
// initial fetch
|
||||
const flush = this.objects.bufferEvents()
|
||||
try {
|
||||
await Promise.all(
|
||||
types.map(async type => {
|
||||
try {
|
||||
// FIXME: use _transportCall to avoid auto-reconnection
|
||||
forOwn(
|
||||
await this._sessionCall(`${type}.get_all_records`),
|
||||
(record, ref) => {
|
||||
// we can bypass _processEvents here because they are all *add*
|
||||
// event and all objects are of the same type
|
||||
this._addRecordToCache(type, ref, record)
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
// there is nothing ideal to do here, do not interrupt event
|
||||
// handling
|
||||
if (error != null && error.code !== 'MESSAGE_REMOVED') {
|
||||
console.warn('_watchEvents', 'initial fetch', type, error)
|
||||
}
|
||||
}
|
||||
// eslint-disable-next-line no-labels
|
||||
mainLoop: while (true) {
|
||||
if (this._resolveObjectsFetched === undefined) {
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
)
|
||||
} finally {
|
||||
flush()
|
||||
}
|
||||
this._resolveObjectsFetched()
|
||||
|
||||
// event loop
|
||||
const debounce = this._debounce
|
||||
while (true) {
|
||||
if (debounce != null) {
|
||||
await pDelay(debounce)
|
||||
}
|
||||
|
||||
let result
|
||||
await this._connected
|
||||
|
||||
// compute the initial token for the event loop
|
||||
//
|
||||
// we need to do this before the initial fetch to avoid losing events
|
||||
let fromToken
|
||||
try {
|
||||
result = await this._sessionCall(
|
||||
'event.from',
|
||||
[
|
||||
types,
|
||||
fromToken,
|
||||
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
|
||||
],
|
||||
EVENT_TIMEOUT * 1e3 * 1.1
|
||||
)
|
||||
fromToken = await this._sessionCall('event.inject', [
|
||||
'pool',
|
||||
this._pool.$ref,
|
||||
])
|
||||
} catch (error) {
|
||||
if (error instanceof TimeoutError) {
|
||||
if (error?.code === 'MESSAGE_METHOD_UNKNOWN') {
|
||||
return this._watchEventsLegacy()
|
||||
}
|
||||
|
||||
console.warn('_watchEvents', error)
|
||||
await pDelay(this._eventPollDelay)
|
||||
continue
|
||||
}
|
||||
|
||||
const types = this._watchedTypes ?? this._types
|
||||
|
||||
// initial fetch
|
||||
await this._refreshCachedRecords(types)
|
||||
this._resolveObjectsFetched()
|
||||
this._resolveObjectsFetched = undefined
|
||||
|
||||
// event loop
|
||||
const debounce = this._debounce
|
||||
while (true) {
|
||||
await pDelay(debounce)
|
||||
|
||||
await this._connected
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await this._sessionCall(
|
||||
'event.from',
|
||||
[
|
||||
types,
|
||||
fromToken,
|
||||
EVENT_TIMEOUT + 0.1, // must be float for XML-RPC transport
|
||||
],
|
||||
EVENT_TIMEOUT * 1e3 * 1.1
|
||||
)
|
||||
} catch (error) {
|
||||
if (error?.code === 'EVENTS_LOST') {
|
||||
// eslint-disable-next-line no-labels
|
||||
continue mainLoop
|
||||
}
|
||||
|
||||
console.warn('_watchEvents', error)
|
||||
await pDelay(this._eventPollDelay)
|
||||
continue
|
||||
}
|
||||
if (areEventsLost(error)) {
|
||||
return this._watchEvents()
|
||||
|
||||
fromToken = result.token
|
||||
this._processEvents(result.events)
|
||||
|
||||
// detect and fix disappearing tasks (e.g. when toolstack restarts)
|
||||
if (result.valid_ref_counts.task !== this._nTasks) {
|
||||
await this._refreshCachedRecords(['task'])
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
fromToken = result.token
|
||||
this._processEvents(result.events)
|
||||
|
||||
// detect and fix disappearing tasks (e.g. when toolstack restarts)
|
||||
if (result.valid_ref_counts.task !== this._nTasks) {
|
||||
await ignoreErrors.call(
|
||||
this._sessionCall('task.get_all_records').then(tasks => {
|
||||
const toRemove = new Set()
|
||||
forOwn(this.objects.all, object => {
|
||||
if (object.$type === 'task') {
|
||||
toRemove.add(object.$ref)
|
||||
}
|
||||
})
|
||||
forOwn(tasks, (task, ref) => {
|
||||
toRemove.delete(ref)
|
||||
this._addRecordToCache('task', ref, task)
|
||||
})
|
||||
toRemove.forEach(ref => {
|
||||
this._removeRecordFromCache('task', ref)
|
||||
})
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -975,55 +971,46 @@ export class Xapi extends EventEmitter {
|
||||
// methods.
|
||||
//
|
||||
// It also has to manually get all objects first.
|
||||
_watchEventsLegacy() {
|
||||
const getAllObjects = async () => {
|
||||
const flush = this.objects.bufferEvents()
|
||||
async _watchEventsLegacy() {
|
||||
if (this._resolveObjectsFetched === undefined) {
|
||||
this._objectsFetched = new Promise(resolve => {
|
||||
this._resolveObjectsFetched = resolve
|
||||
})
|
||||
}
|
||||
|
||||
await this._connected
|
||||
|
||||
const types = this._watchedTypes ?? this._types
|
||||
|
||||
// initial fetch
|
||||
await this._refreshCachedRecords(types)
|
||||
this._resolveObjectsFetched()
|
||||
this._resolveObjectsFetched = undefined
|
||||
|
||||
await this._sessionCall('event.register', [types])
|
||||
|
||||
// event loop
|
||||
const debounce = this._debounce
|
||||
while (true) {
|
||||
await pDelay(debounce)
|
||||
|
||||
try {
|
||||
await Promise.all(
|
||||
this._types.map(type =>
|
||||
this._sessionCall(`${type}.get_all_records`).then(
|
||||
objects => {
|
||||
forEach(objects, (object, ref) => {
|
||||
this._addRecordToCache(type, ref, object)
|
||||
})
|
||||
},
|
||||
error => {
|
||||
if (error.code !== 'MESSAGE_REMOVED') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
await this._connected
|
||||
this._processEvents(
|
||||
await this._sessionCall('event.next', undefined, EVENT_TIMEOUT * 1e3)
|
||||
)
|
||||
} finally {
|
||||
flush()
|
||||
} catch (error) {
|
||||
if (error?.code === 'EVENTS_LOST') {
|
||||
await ignoreErrors.call(
|
||||
this._sessionCall('event.unregister', [types])
|
||||
)
|
||||
return this._watchEventsLegacy()
|
||||
}
|
||||
|
||||
console.warn('_watchEventsLegacy', error)
|
||||
await pDelay(this._eventPollDelay)
|
||||
}
|
||||
this._resolveObjectsFetched()
|
||||
}
|
||||
|
||||
const watchEvents = () =>
|
||||
this._sessionCall('event.register', [['*']]).then(loop)
|
||||
|
||||
const loop = () =>
|
||||
this.status === CONNECTED &&
|
||||
this._sessionCall('event.next').then(onSuccess, onFailure)
|
||||
|
||||
const onSuccess = events => {
|
||||
this._processEvents(events)
|
||||
|
||||
const debounce = this._debounce
|
||||
return debounce == null ? loop() : pDelay(debounce).then(loop)
|
||||
}
|
||||
|
||||
const onFailure = error => {
|
||||
if (areEventsLost(error)) {
|
||||
return this._sessionCall('event.unregister', [['*']]).then(watchEvents)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
|
||||
return getAllObjects().then(watchEvents)
|
||||
}
|
||||
|
||||
_wrapRecord(type, ref, data) {
|
||||
@@ -1036,17 +1023,23 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const getObjectByRef = ref => this._objectsByRef[ref]
|
||||
|
||||
Record = function(ref, data) {
|
||||
defineProperties(this, {
|
||||
$id: { value: data.uuid ?? ref },
|
||||
$ref: { value: ref },
|
||||
$xapi: { value: xapi },
|
||||
})
|
||||
for (let i = 0; i < nFields; ++i) {
|
||||
const field = fields[i]
|
||||
this[field] = data[field]
|
||||
Record = defineProperty(
|
||||
function(ref, data) {
|
||||
defineProperties(this, {
|
||||
$id: { value: data.uuid ?? ref },
|
||||
$ref: { value: ref },
|
||||
$xapi: { value: xapi },
|
||||
})
|
||||
for (let i = 0; i < nFields; ++i) {
|
||||
const field = fields[i]
|
||||
this[field] = data[field]
|
||||
}
|
||||
},
|
||||
'name',
|
||||
{
|
||||
value: type,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
const getters = { $pool: getPool }
|
||||
const props = { $type: type }
|
||||
@@ -1116,123 +1109,6 @@ export class Xapi extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
Xapi.prototype._transportCall = reduce(
|
||||
[
|
||||
function(method, args) {
|
||||
return pTimeout
|
||||
.call(this._call(method, args), HTTP_TIMEOUT)
|
||||
.catch(error => {
|
||||
if (!(error instanceof Error)) {
|
||||
error = XapiError.wrap(error)
|
||||
}
|
||||
|
||||
// do not log the session ID
|
||||
//
|
||||
// TODO: should log at the session level to avoid logging sensitive
|
||||
// values?
|
||||
const params = args[0] === this._sessionId ? args.slice(1) : args
|
||||
|
||||
error.call = {
|
||||
method,
|
||||
params: replaceSensitiveValues(params, '* obfuscated *'),
|
||||
}
|
||||
throw error
|
||||
})
|
||||
},
|
||||
call =>
|
||||
function() {
|
||||
let iterator // lazily created
|
||||
const loop = () =>
|
||||
pCatch.call(
|
||||
call.apply(this, arguments),
|
||||
isNetworkError,
|
||||
isXapiNetworkError,
|
||||
error => {
|
||||
if (iterator === undefined) {
|
||||
iterator = fibonacci()
|
||||
.clamp(undefined, 60)
|
||||
.take(10)
|
||||
.toMs()
|
||||
}
|
||||
|
||||
const cursor = iterator.next()
|
||||
if (!cursor.done) {
|
||||
// TODO: ability to cancel the connection
|
||||
// TODO: ability to force immediate reconnection
|
||||
|
||||
const delay = cursor.value
|
||||
debug(
|
||||
'%s: network error %s, next try in %s ms',
|
||||
this._humanId,
|
||||
error.code,
|
||||
delay
|
||||
)
|
||||
return pDelay(delay).then(loop)
|
||||
}
|
||||
|
||||
debug('%s: network error %s, aborting', this._humanId, error.code)
|
||||
|
||||
// mark as disconnected
|
||||
pCatch.call(this.disconnect(), noop)
|
||||
|
||||
throw error
|
||||
}
|
||||
)
|
||||
return loop()
|
||||
},
|
||||
call =>
|
||||
function loop() {
|
||||
return pCatch.call(
|
||||
call.apply(this, arguments),
|
||||
isHostSlave,
|
||||
({ params: [master] }) => {
|
||||
debug(
|
||||
'%s: host is slave, attempting to connect at %s',
|
||||
this._humanId,
|
||||
master
|
||||
)
|
||||
|
||||
const newUrl = {
|
||||
...this._url,
|
||||
hostname: master,
|
||||
}
|
||||
this.emit('redirect', newUrl)
|
||||
this._url = newUrl
|
||||
|
||||
return loop.apply(this, arguments)
|
||||
}
|
||||
)
|
||||
},
|
||||
call =>
|
||||
function(method) {
|
||||
const startTime = Date.now()
|
||||
return call.apply(this, arguments).then(
|
||||
result => {
|
||||
debug(
|
||||
'%s: %s(...) [%s] ==> %s',
|
||||
this._humanId,
|
||||
method,
|
||||
ms(Date.now() - startTime),
|
||||
kindOf(result)
|
||||
)
|
||||
return result
|
||||
},
|
||||
error => {
|
||||
debug(
|
||||
'%s: %s(...) [%s] =!> %s',
|
||||
this._humanId,
|
||||
method,
|
||||
ms(Date.now() - startTime),
|
||||
error
|
||||
)
|
||||
throw error
|
||||
}
|
||||
)
|
||||
},
|
||||
],
|
||||
(call, decorator) => decorator(call)
|
||||
)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// The default value is a factory function.
|
||||
|
||||
@@ -4,31 +4,33 @@ import { pDelay } from 'promise-toolbox'
|
||||
|
||||
import { createClient } from './'
|
||||
|
||||
const xapi = (() => {
|
||||
const [, , url, user, password] = process.argv
|
||||
|
||||
return createClient({
|
||||
auth: { user, password },
|
||||
async function main([url]) {
|
||||
const xapi = createClient({
|
||||
allowUnauthorized: true,
|
||||
url,
|
||||
watchEvents: false,
|
||||
})
|
||||
})()
|
||||
await xapi.connect()
|
||||
|
||||
xapi
|
||||
.connect()
|
||||
|
||||
// Get the pool record's ref.
|
||||
.then(() => xapi.call('pool.get_all'))
|
||||
|
||||
// Injects lots of events.
|
||||
.then(([poolRef]) => {
|
||||
const loop = () =>
|
||||
pDelay
|
||||
.call(
|
||||
xapi.call('event.inject', 'pool', poolRef),
|
||||
10 // A small delay is required to avoid overloading the Xen API.
|
||||
)
|
||||
.then(loop)
|
||||
|
||||
return loop()
|
||||
let loop = true
|
||||
process.on('SIGINT', () => {
|
||||
loop = false
|
||||
})
|
||||
|
||||
const { pool } = xapi
|
||||
// eslint-disable-next-line no-unmodified-loop-condition
|
||||
while (loop) {
|
||||
await pool.update_other_config(
|
||||
'xo:injectEvents',
|
||||
Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
)
|
||||
await pDelay(1e2)
|
||||
}
|
||||
|
||||
await pool.update_other_config('xo:injectEvents', null)
|
||||
await xapi.disconnect()
|
||||
}
|
||||
|
||||
main(process.argv.slice(2)).catch(console.error)
|
||||
|
||||
@@ -25,5 +25,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"xo-common": "^0.2.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"csv-parser": "^2.1.0",
|
||||
@@ -55,6 +55,7 @@
|
||||
"lint": "tslint 'src/*.ts'",
|
||||
"posttest": "yarn run lint",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node dist/index.js"
|
||||
"start": "node dist/index.js",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"jsonrpc-websocket-client": "^0.4.1",
|
||||
"jsonrpc-websocket-client": "^0.5.0",
|
||||
"lodash": "^4.17.2",
|
||||
"make-error": "^1.0.4"
|
||||
},
|
||||
@@ -49,6 +49,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,5 +41,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -55,5 +55,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.15.0",
|
||||
"version": "0.16.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -36,6 +36,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -43,6 +44,8 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.3",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
@@ -55,5 +58,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, get, startCase } from 'lodash'
|
||||
import { forEach, groupBy, startCase } from 'lodash'
|
||||
import pkg from '../package'
|
||||
|
||||
const logger = createLogger('xo:xo-server-backup-reports')
|
||||
|
||||
export const configurationSchema = {
|
||||
type: 'object',
|
||||
|
||||
@@ -46,6 +49,9 @@ export const testSchema = {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const INDENT = ' '
|
||||
const UNKNOWN_ITEM = 'Unknown'
|
||||
|
||||
const ICON_FAILURE = '🚨'
|
||||
const ICON_INTERRUPTED = '⚠️'
|
||||
const ICON_SKIPPED = '⏩'
|
||||
@@ -60,7 +66,7 @@ const STATUS_ICON = {
|
||||
}
|
||||
|
||||
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
const createDateFormatter = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
@@ -86,10 +92,6 @@ const formatSpeed = (bytes, milliseconds) =>
|
||||
})
|
||||
: 'N/A'
|
||||
|
||||
const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
@@ -100,40 +102,114 @@ const isSkippedError = error =>
|
||||
error.message === UNHEALTHY_VDI_CHAIN_ERROR ||
|
||||
error.message === NO_SUCH_OBJECT_ERROR
|
||||
|
||||
const INDENT = ' '
|
||||
const createGetTemporalDataMarkdown = formatDate => (
|
||||
start,
|
||||
end,
|
||||
nbIndent = 0
|
||||
) => {
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
// ===================================================================
|
||||
|
||||
const markdown = [`${indent}- **Start time**: ${formatDate(start)}`]
|
||||
const STATUS = ['failure', 'interrupted', 'skipped', 'success']
|
||||
const TITLE_BY_STATUS = {
|
||||
failure: n => `## ${n} Failure${n === 1 ? '' : 's'}`,
|
||||
interrupted: n => `## ${n} Interrupted`,
|
||||
skipped: n => `## ${n} Skipped`,
|
||||
success: n => `## ${n} Success${n === 1 ? '' : 'es'}`,
|
||||
}
|
||||
|
||||
const getTemporalDataMarkdown = (end, start, formatDate) => {
|
||||
const markdown = [`- **Start time**: ${formatDate(start)}`]
|
||||
if (end !== undefined) {
|
||||
markdown.push(`${indent}- **End time**: ${formatDate(end)}`)
|
||||
markdown.push(`- **End time**: ${formatDate(end)}`)
|
||||
const duration = end - start
|
||||
if (duration >= 1) {
|
||||
markdown.push(`${indent}- **Duration**: ${formatDuration(duration)}`)
|
||||
markdown.push(`- **Duration**: ${formatDuration(duration)}`)
|
||||
}
|
||||
}
|
||||
return markdown
|
||||
}
|
||||
|
||||
const addWarnings = (text, warnings, nbIndent = 0) => {
|
||||
if (warnings === undefined) {
|
||||
const getWarningsMarkdown = (warnings = []) =>
|
||||
warnings.map(({ message }) => `- **${ICON_WARNING} ${message}**`)
|
||||
|
||||
const getErrorMarkdown = task => {
|
||||
let message
|
||||
if (
|
||||
task.status === 'success' ||
|
||||
(message = task.result?.message ?? task.result?.code) === undefined
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const indent = INDENT.repeat(nbIndent)
|
||||
warnings.forEach(({ message }) => {
|
||||
text.push(`${indent}- **${ICON_WARNING} ${message}**`)
|
||||
})
|
||||
const label = task.status === 'skipped' ? 'Reason' : 'Error'
|
||||
return `- **${label}**: ${message}`
|
||||
}
|
||||
|
||||
const MARKDOWN_BY_TYPE = {
|
||||
pool(task, { formatDate }) {
|
||||
const { pool, poolMaster = {} } = task.data
|
||||
const name = pool.name_label || poolMaster.name_label || UNKNOWN_ITEM
|
||||
|
||||
return {
|
||||
body: [
|
||||
`- **UUID**: ${pool.uuid}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[pool] ${name}`,
|
||||
}
|
||||
},
|
||||
xo(task, { formatDate, jobName }) {
|
||||
return {
|
||||
body: [
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[XO] ${jobName}`,
|
||||
}
|
||||
},
|
||||
async remote(task, { formatDate, xo }) {
|
||||
const id = task.data.id
|
||||
const name = await xo.getRemote(id).then(
|
||||
({ name }) => name,
|
||||
error => {
|
||||
logger.warn(error)
|
||||
return UNKNOWN_ITEM
|
||||
}
|
||||
)
|
||||
return {
|
||||
body: [
|
||||
`- **ID**: ${id}`,
|
||||
...getTemporalDataMarkdown(task.end, task.start, formatDate),
|
||||
getErrorMarkdown(task),
|
||||
],
|
||||
title: `[remote] ${name}`,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getMarkdown = (task, props) =>
|
||||
MARKDOWN_BY_TYPE[(task.data?.type)]?.(task, props)
|
||||
|
||||
const toMarkdown = parts => {
|
||||
const lines = []
|
||||
let indentLevel = 0
|
||||
|
||||
const helper = part => {
|
||||
if (typeof part === 'string') {
|
||||
lines.push(`${INDENT.repeat(indentLevel)}${part}`)
|
||||
} else if (Array.isArray(part)) {
|
||||
++indentLevel
|
||||
part.forEach(helper)
|
||||
--indentLevel
|
||||
}
|
||||
}
|
||||
helper(parts)
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class BackupReportsXoPlugin {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
this._report = this._wrapper.bind(this)
|
||||
this._report = this._report.bind(this)
|
||||
}
|
||||
|
||||
configure({ toMails, toXmpp }) {
|
||||
@@ -146,72 +222,174 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
test({ runId }) {
|
||||
return this._backupNgListener(undefined, undefined, undefined, runId)
|
||||
return this._report(runId, undefined, true)
|
||||
}
|
||||
|
||||
unload() {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper(status, job, schedule, runJobId) {
|
||||
if (job.type === 'metadataBackup') {
|
||||
return
|
||||
}
|
||||
async _report(runJobId, { type, status } = {}, force) {
|
||||
const xo = this._xo
|
||||
try {
|
||||
if (type === 'call') {
|
||||
return this._legacyVmHandler(status)
|
||||
}
|
||||
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule, runJobId)
|
||||
: this._listener(status, job, schedule, runJobId)
|
||||
)
|
||||
).catch(logError)
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
}
|
||||
|
||||
const reportWhen = log.data.reportWhen
|
||||
if (
|
||||
!force &&
|
||||
(reportWhen === 'never' ||
|
||||
// Handle improper value introduced by:
|
||||
// https://github.com/vatesfr/xen-orchestra/commit/753ee994f2948bbaca9d3161eaab82329a682773#diff-9c044ab8a42ed6576ea927a64c1ec3ebR105
|
||||
reportWhen === 'Never' ||
|
||||
(reportWhen === 'failure' && log.status === 'success'))
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
const [job, schedule] = await Promise.all([
|
||||
await xo.getJob(log.jobId),
|
||||
await xo.getSchedule(log.scheduleId).catch(error => {
|
||||
logger.warn(error)
|
||||
}),
|
||||
])
|
||||
|
||||
if (job.type === 'backup') {
|
||||
return this._ngVmHandler(log, job, schedule, force)
|
||||
} else if (job.type === 'metadataBackup') {
|
||||
return this._metadataHandler(log, job, schedule, force)
|
||||
}
|
||||
|
||||
throw new Error(`Unknown backup job type: ${job.type}`)
|
||||
} catch (error) {
|
||||
logger.warn(error)
|
||||
}
|
||||
}
|
||||
|
||||
async _backupNgListener(_1, _2, schedule, runJobId) {
|
||||
async _metadataHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
if (log === undefined) {
|
||||
throw new Error(`no log found with runId=${JSON.stringify(runJobId)}`)
|
||||
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const tasksByStatus = groupBy(log.tasks, 'status')
|
||||
const n = log.tasks?.length ?? 0
|
||||
const nSuccesses = tasksByStatus.success?.length ?? 0
|
||||
|
||||
if (!force && log.data.reportWhen === 'failure') {
|
||||
delete tasksByStatus.success
|
||||
}
|
||||
|
||||
// header
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Job name**: ${jobName}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
n !== 0 && `- **Successes**: ${nSuccesses} / ${n}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
getErrorMarkdown(log),
|
||||
]
|
||||
|
||||
const nagiosText = []
|
||||
|
||||
// body
|
||||
for (const status of STATUS) {
|
||||
const tasks = tasksByStatus[status]
|
||||
if (tasks === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
// tasks header
|
||||
markdown.push('---', '', TITLE_BY_STATUS[status](tasks.length))
|
||||
|
||||
// tasks body
|
||||
for (const task of tasks) {
|
||||
const taskMarkdown = await getMarkdown(task, {
|
||||
formatDate,
|
||||
jobName: log.jobName,
|
||||
})
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const { title, body } = taskMarkdown
|
||||
const subMarkdown = [...body, ...getWarningsMarkdown(task.warnings)]
|
||||
|
||||
if (task.status !== 'success') {
|
||||
nagiosText.push(`[${task.status}] ${title}`)
|
||||
}
|
||||
|
||||
for (const subTask of task.tasks ?? []) {
|
||||
const taskMarkdown = await getMarkdown(subTask, { formatDate, xo })
|
||||
if (taskMarkdown === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTask.status]
|
||||
const { title, body } = taskMarkdown
|
||||
subMarkdown.push([
|
||||
`- **${title}** ${icon}`,
|
||||
[...body, ...getWarningsMarkdown(subTask.warnings)],
|
||||
])
|
||||
}
|
||||
markdown.push('', '', `### ${title}`, ...subMarkdown)
|
||||
}
|
||||
}
|
||||
|
||||
// footer
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${log.status} − Metadata backup report for ${
|
||||
log.jobName
|
||||
} ${STATUS_ICON[log.status]}`,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Metadata backup report for ${
|
||||
log.jobName
|
||||
}`
|
||||
: `[Xen Orchestra] [${log.status}] Metadata backup report for ${
|
||||
log.jobName
|
||||
} - ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
async _ngVmHandler(log, { name: jobName }, schedule, force) {
|
||||
const xo = this._xo
|
||||
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
if (
|
||||
reportWhen === 'never' ||
|
||||
(log.status === 'success' && reportWhen === 'failure')
|
||||
) {
|
||||
return
|
||||
}
|
||||
|
||||
if (schedule === undefined) {
|
||||
schedule = await xo.getSchedule(log.scheduleId)
|
||||
}
|
||||
const formatDate = createDateFormatter(schedule?.timezone)
|
||||
|
||||
const jobName = (await xo.getJob(log.jobId, 'backup')).name
|
||||
const formatDate = createDateFormater(schedule.timezone)
|
||||
const getTemporalDataMarkdown = createGetTemporalDataMarkdown(formatDate)
|
||||
|
||||
if (
|
||||
(log.status === 'failure' || log.status === 'skipped') &&
|
||||
log.result !== undefined
|
||||
) {
|
||||
let markdown = [
|
||||
if (log.tasks === undefined) {
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
`- **Error**: ${log.result.message}`,
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
getErrorMarkdown(log),
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
@@ -231,7 +409,7 @@ class BackupReportsXoPlugin {
|
||||
let nSkipped = 0
|
||||
let nInterrupted = 0
|
||||
for (const taskLog of log.tasks) {
|
||||
if (taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
if (!force && taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -244,16 +422,16 @@ class BackupReportsXoPlugin {
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
...getTemporalDataMarkdown(taskLog.start, taskLog.end),
|
||||
...getTemporalDataMarkdown(taskLog.end, taskLog.start, formatDate),
|
||||
...getWarningsMarkdown(taskLog.warnings),
|
||||
]
|
||||
addWarnings(text, taskLog.warnings)
|
||||
|
||||
const failedSubTasks = []
|
||||
const snapshotText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
|
||||
for (const subTaskLog of taskLog.tasks || []) {
|
||||
for (const subTaskLog of taskLog.tasks ?? []) {
|
||||
if (
|
||||
subTaskLog.message !== 'export' &&
|
||||
subTaskLog.message !== 'snapshot'
|
||||
@@ -262,29 +440,36 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
const icon = STATUS_ICON[subTaskLog.status]
|
||||
const errorMessage = ` - **Error**: ${get(
|
||||
subTaskLog.result,
|
||||
'message'
|
||||
)}`
|
||||
const type = subTaskLog.data?.type
|
||||
const errorMarkdown = getErrorMarkdown(subTaskLog)
|
||||
|
||||
if (subTaskLog.message === 'snapshot') {
|
||||
snapshotText.push(
|
||||
`- **Snapshot** ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 1)
|
||||
)
|
||||
} else if (subTaskLog.data.type === 'remote') {
|
||||
snapshotText.push(`- **Snapshot** ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
])
|
||||
} else if (type === 'remote') {
|
||||
const id = subTaskLog.data.id
|
||||
const remote = await xo.getRemote(id).catch(() => {})
|
||||
remotesText.push(
|
||||
` - **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${id}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(remotesText, subTaskLog.warnings, 2)
|
||||
const remote = await xo.getRemote(id).catch(error => {
|
||||
logger.warn(error)
|
||||
})
|
||||
const title = remote !== undefined ? remote.name : `Remote Not found`
|
||||
|
||||
remotesText.push(`- **${title}** (${id}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : id)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const id = subTaskLog.data.id
|
||||
@@ -294,14 +479,17 @@ class BackupReportsXoPlugin {
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
|
||||
srsText.push(
|
||||
` - **${srName}** (${srUuid}) ${icon}`,
|
||||
...getTemporalDataMarkdown(subTaskLog.start, subTaskLog.end, 2)
|
||||
)
|
||||
addWarnings(srsText, subTaskLog.warnings, 2)
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`, [
|
||||
...getTemporalDataMarkdown(
|
||||
subTaskLog.end,
|
||||
subTaskLog.start,
|
||||
formatDate
|
||||
),
|
||||
...getWarningsMarkdown(subTaskLog.warnings),
|
||||
errorMarkdown,
|
||||
])
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,53 +501,48 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const operationInfoText = []
|
||||
addWarnings(operationInfoText, operationLog.warnings, 3)
|
||||
if (operationLog.status === 'success') {
|
||||
const size = operationLog.result.size
|
||||
const size = operationLog.result?.size
|
||||
if (size > 0) {
|
||||
if (operationLog.message === 'merge') {
|
||||
globalMergeSize += size
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
}
|
||||
|
||||
operationInfoText.push(
|
||||
` - **Size**: ${formatSize(size)}`,
|
||||
` - **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`
|
||||
)
|
||||
} else if (get(operationLog.result, 'message') !== undefined) {
|
||||
operationInfoText.push(
|
||||
` - **Error**: ${get(operationLog.result, 'message')}`
|
||||
)
|
||||
}
|
||||
const operationText = [
|
||||
` - **${operationLog.message}** ${
|
||||
STATUS_ICON[operationLog.status]
|
||||
}`,
|
||||
...getTemporalDataMarkdown(operationLog.start, operationLog.end, 3),
|
||||
...operationInfoText,
|
||||
].join('\n')
|
||||
if (get(subTaskLog, 'data.type') === 'remote') {
|
||||
`- **${operationLog.message}** ${STATUS_ICON[operationLog.status]}`,
|
||||
[
|
||||
...getTemporalDataMarkdown(
|
||||
operationLog.end,
|
||||
operationLog.start,
|
||||
formatDate
|
||||
),
|
||||
size > 0 && `- **Size**: ${formatSize(size)}`,
|
||||
size > 0 &&
|
||||
`- **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
...getWarningsMarkdown(operationLog.warnings),
|
||||
getErrorMarkdown(operationLog),
|
||||
],
|
||||
]
|
||||
if (type === 'remote') {
|
||||
remotesText.push(operationText)
|
||||
remotesText.join('\n')
|
||||
}
|
||||
if (get(subTaskLog, 'data.type') === 'SR') {
|
||||
} else if (type === 'SR') {
|
||||
srsText.push(operationText)
|
||||
srsText.join('\n')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`- **SRs**`)
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`- **Remotes**`)
|
||||
}
|
||||
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
|
||||
const subText = [
|
||||
...snapshotText,
|
||||
srsText.length !== 0 && `- **SRs**`,
|
||||
srsText,
|
||||
remotesText.length !== 0 && `- **Remotes**`,
|
||||
remotesText,
|
||||
]
|
||||
if (taskLog.result !== undefined) {
|
||||
if (taskLog.status === 'skipped') {
|
||||
++nSkipped
|
||||
@@ -369,8 +552,7 @@ class BackupReportsXoPlugin {
|
||||
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: taskLog.result.message
|
||||
}`,
|
||||
''
|
||||
}`
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -379,11 +561,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(
|
||||
...text,
|
||||
`- **Error**: ${taskLog.result.message}`,
|
||||
''
|
||||
)
|
||||
failedVmsText.push(...text, `- **Error**: ${taskLog.result.message}`)
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
@@ -394,7 +572,7 @@ class BackupReportsXoPlugin {
|
||||
} else {
|
||||
if (taskLog.status === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
failedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
@@ -402,37 +580,34 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
} else if (taskLog.status === 'interrupted') {
|
||||
++nInterrupted
|
||||
interruptedVmsText.push(...text, '', '', ...subText, '')
|
||||
interruptedVmsText.push(...text, ...subText)
|
||||
nagiosText.push(
|
||||
`[(Interrupted) ${vm !== undefined ? vm.name_label : 'undefined'}]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
successfulVmsText.push(...text, ...subText)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nVms = log.tasks.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped - nInterrupted
|
||||
let markdown = [
|
||||
const markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **Job ID**: ${log.jobId}`,
|
||||
`- **Run ID**: ${runJobId}`,
|
||||
`- **Run ID**: ${log.id}`,
|
||||
`- **mode**: ${mode}`,
|
||||
...getTemporalDataMarkdown(log.start, log.end),
|
||||
...getTemporalDataMarkdown(log.end, log.start, formatDate),
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
globalTransferSize !== 0 &&
|
||||
`- **Transfer size**: ${formatSize(globalTransferSize)}`,
|
||||
globalMergeSize !== 0 &&
|
||||
`- **Merge size**: ${formatSize(globalMergeSize)}`,
|
||||
...getWarningsMarkdown(log.warnings),
|
||||
'',
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
addWarnings(markdown, log.warnings)
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
@@ -457,7 +632,7 @@ class BackupReportsXoPlugin {
|
||||
)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
if (nSuccesses !== 0 && (force || reportWhen !== 'failure')) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
@@ -468,9 +643,8 @@ class BackupReportsXoPlugin {
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
markdown: toMarkdown(markdown),
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
@@ -510,9 +684,9 @@ class BackupReportsXoPlugin {
|
||||
])
|
||||
}
|
||||
|
||||
_listener(status) {
|
||||
_legacyVmHandler(status) {
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const formatDate = createDateFormatter(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.4.1"
|
||||
"jsonrpc-websocket-client": "^0.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -44,5 +44,6 @@
|
||||
"build": "NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "NODE_DEV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -42,5 +42,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"nodemailer": "^5.0.0",
|
||||
"nodemailer": "^6.1.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,5 +49,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -50,5 +50,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
"html-minifier": "^4.0.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.12.1"
|
||||
@@ -59,5 +59,6 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
||||
|
||||
@@ -49,6 +49,12 @@ maxTokenValidity = '0.5 year'
|
||||
# Delay for which backups listing on a remote is cached
|
||||
listingDebounce = '1 min'
|
||||
|
||||
# Helmet handles HTTP security via headers
|
||||
#
|
||||
# https://helmetjs.github.io/docs/
|
||||
#[http.helmet.hsts]
|
||||
#includeSubDomains = false
|
||||
|
||||
[[http.listen]]
|
||||
port = 80
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "xo-server",
|
||||
"version": "5.38.0",
|
||||
"version": "5.40.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -91,6 +92,7 @@
|
||||
"make-error": "^1",
|
||||
"micromatch": "^3.1.4",
|
||||
"minimist": "^1.2.0",
|
||||
"mnemonist": "^0.27.2",
|
||||
"moment-timezone": "^0.5.14",
|
||||
"ms": "^2.1.1",
|
||||
"multikey-hash": "^1.0.4",
|
||||
@@ -109,7 +111,7 @@
|
||||
"readable-stream": "^3.2.0",
|
||||
"redis": "^2.8.0",
|
||||
"schema-inspector": "^1.6.8",
|
||||
"semver": "^5.4.1",
|
||||
"semver": "^6.0.0",
|
||||
"serve-static": "^1.13.1",
|
||||
"split-lines": "^2.0.0",
|
||||
"stack-chain": "^2.0.0",
|
||||
@@ -117,18 +119,18 @@
|
||||
"struct-fu": "^1.2.0",
|
||||
"tar-stream": "^2.0.1",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
"tmp": "^0.1.0",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.6.0",
|
||||
"vhd-lib": "^0.6.1",
|
||||
"ws": "^6.0.0",
|
||||
"xen-api": "^0.24.6",
|
||||
"xen-api": "^0.25.1",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-remote-parser": "^0.5.0",
|
||||
"xo-vmdk-to-vhd": "^0.1.6",
|
||||
"xo-vmdk-to-vhd": "^0.1.7",
|
||||
"yazl": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
5
packages/xo-server/src/_XenStore.js
Normal file
@@ -0,0 +1,5 @@
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
export const read = key =>
|
||||
fromCallback(cb => execFile('xenstore-read', [key], cb))
|
||||
@@ -1,6 +1,6 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import pump from 'pump'
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import { parseSize } from '../utils'
|
||||
@@ -128,7 +128,7 @@ async function handleImportContent(req, res, { xapi, id }) {
|
||||
res.end(format.response(0, true))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -248,7 +248,7 @@ async function handleInstallSupplementalPack(req, res, { hostId }) {
|
||||
res.end(format.response(0))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRPcError } from 'json-rpc-peer'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -234,7 +234,7 @@ async function handleInstallSupplementalPack(req, res, { poolId }) {
|
||||
res.end(format.response(0))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRPcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import defer from 'golike-defer'
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { format, JsonRpcError } from 'json-rpc-peer'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { assignWith, concat } from 'lodash'
|
||||
import {
|
||||
@@ -193,6 +193,11 @@ create.params = {
|
||||
optional: true,
|
||||
},
|
||||
|
||||
networkConfig: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
|
||||
coreOs: {
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
@@ -1198,7 +1203,7 @@ async function handleVmImport(req, res, { data, srId, type, xapi }) {
|
||||
res.end(format.response(0, vm.$id))
|
||||
} catch (e) {
|
||||
res.writeHead(500)
|
||||
res.end(format.error(0, new Error(e.message)))
|
||||
res.end(format.error(0, new JsonRpcError(e.message)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ async function loadConfiguration() {
|
||||
function createExpressApp(config) {
|
||||
const app = createExpress()
|
||||
|
||||
app.use(helmet())
|
||||
app.use(helmet(config.http.helmet))
|
||||
|
||||
app.use(compression())
|
||||
|
||||
@@ -417,6 +417,7 @@ const setUpProxies = (express, opts, xo) => {
|
||||
}
|
||||
|
||||
const proxy = createProxyServer({
|
||||
changeOrigin: true,
|
||||
ignorePath: true,
|
||||
}).on('error', error => console.error(error))
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@ import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
|
||||
import { parseProp } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Remote extends Model {}
|
||||
@@ -14,12 +16,21 @@ export class Remotes extends Collection {
|
||||
async get(properties) {
|
||||
const remotes = await super.get(properties)
|
||||
forEach(remotes, remote => {
|
||||
remote.benchmarks =
|
||||
remote.benchmarks !== undefined
|
||||
? JSON.parse(remote.benchmarks)
|
||||
: undefined
|
||||
remote.benchmarks = parseProp('remote', remote, 'benchmarks')
|
||||
remote.enabled = remote.enabled === 'true'
|
||||
})
|
||||
return remotes
|
||||
}
|
||||
|
||||
_update(remotes) {
|
||||
return super._update(
|
||||
remotes.map(remote => {
|
||||
const { benchmarks } = remote
|
||||
if (benchmarks !== undefined) {
|
||||
remote.benchmarks = JSON.stringify(benchmarks)
|
||||
}
|
||||
return remote
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -633,7 +633,7 @@ const TRANSFORMS = {
|
||||
description: poolPatch.name_description,
|
||||
name: poolPatch.name_label,
|
||||
pool_patch: poolPatch.$ref,
|
||||
size: poolPatch.size,
|
||||
size: +poolPatch.size,
|
||||
guidance: poolPatch.after_apply_guidance,
|
||||
time: toTimestamp(obj.timestamp_applied),
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
/* eslint eslint-comments/disable-enable-pair: [error, {allowWholeFile: true}] */
|
||||
/* eslint-disable camelcase */
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
@@ -869,7 +870,13 @@ export default class Xapi extends XapiBase {
|
||||
_assertHealthyVdiChains(vm) {
|
||||
const cache = { __proto__: null }
|
||||
forEach(vm.$VBDs, ({ $VDI }) => {
|
||||
this._assertHealthyVdiChain($VDI, cache)
|
||||
try {
|
||||
this._assertHealthyVdiChain($VDI, cache)
|
||||
} catch (error) {
|
||||
error.VDI = $VDI
|
||||
error.VM = vm
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ export default {
|
||||
|
||||
coreOs = false,
|
||||
cloudConfig = undefined,
|
||||
networkConfig = undefined,
|
||||
|
||||
vgpuType = undefined,
|
||||
gpuGroup = undefined,
|
||||
@@ -241,10 +242,16 @@ export default {
|
||||
}
|
||||
})
|
||||
|
||||
const method = coreOs
|
||||
? 'createCoreOsCloudInitConfigDrive'
|
||||
: 'createCloudInitConfigDrive'
|
||||
await this[method](vm.$id, srRef, cloudConfig)
|
||||
if (coreOs) {
|
||||
await this.createCoreOsCloudInitConfigDrive(vm.$id, srRef, cloudConfig)
|
||||
} else {
|
||||
await this.createCloudInitConfigDrive(
|
||||
vm.$id,
|
||||
srRef,
|
||||
cloudConfig,
|
||||
networkConfig
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the record with all the VBDs and VIFs
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import * as MultiCounter from '@xen-orchestra/multi-counter'
|
||||
import aclResolver from 'xo-acl-resolver'
|
||||
import { forEach, includes, map } from 'lodash'
|
||||
|
||||
@@ -6,9 +7,11 @@ import { Acls } from '../models/acl'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// TODO add cache per user
|
||||
export default class {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
this._cacheByObjectByUser = { __proto__: null }
|
||||
|
||||
const aclsDb = (this._acls = new Acls({
|
||||
connection: xo._redis,
|
||||
@@ -16,6 +19,20 @@ export default class {
|
||||
indexes: ['subject', 'object'],
|
||||
}))
|
||||
|
||||
const nSessionsByUser = MultiCounter.create()
|
||||
xo.on('session.open', session => {
|
||||
const userId = session.get('user_id')
|
||||
if (nSessionsByUser[userId]++ === 0) {
|
||||
this._cacheByObjectByUser[userId] = { __proto__: null }
|
||||
}
|
||||
})
|
||||
xo.on('session.close', session => {
|
||||
const userId = session.get('user_id')
|
||||
if (--nSessionsByUser[userId] === 0) {
|
||||
delete this._cacheByObjectByUser[userId]
|
||||
}
|
||||
})
|
||||
|
||||
xo.on('start', () => {
|
||||
xo.addConfigManager(
|
||||
'acls',
|
||||
|
||||
@@ -164,11 +164,11 @@ export default class Jobs {
|
||||
|
||||
xo.emit(
|
||||
'job:terminated',
|
||||
undefined,
|
||||
job,
|
||||
undefined,
|
||||
// This cast can be removed after merging the PR: https://github.com/vatesfr/xen-orchestra/pull/3209
|
||||
String(job.runId)
|
||||
String(job.runId),
|
||||
{
|
||||
type: job.type,
|
||||
}
|
||||
)
|
||||
return this.updateJob({ id: job.id, runId: null })
|
||||
})
|
||||
@@ -266,6 +266,11 @@ export default class Jobs {
|
||||
reportWhen: (settings && settings.reportWhen) || 'failure',
|
||||
}
|
||||
}
|
||||
if (type === 'metadataBackup') {
|
||||
data = {
|
||||
reportWhen: job.settings['']?.reportWhen ?? 'failure',
|
||||
}
|
||||
}
|
||||
|
||||
const logger = this._logger
|
||||
const runJobId = logger.notice(`Starting execution of ${id}.`, {
|
||||
@@ -314,7 +319,10 @@ export default class Jobs {
|
||||
true
|
||||
)
|
||||
|
||||
app.emit('job:terminated', status, job, schedule, runJobId)
|
||||
app.emit('job:terminated', runJobId, {
|
||||
type: job.type,
|
||||
status,
|
||||
})
|
||||
} catch (error) {
|
||||
await logger.error(
|
||||
`The execution of ${id} has failed.`,
|
||||
@@ -325,7 +333,9 @@ export default class Jobs {
|
||||
},
|
||||
true
|
||||
)
|
||||
app.emit('job:terminated', undefined, job, schedule, runJobId)
|
||||
app.emit('job:terminated', runJobId, {
|
||||
type: job.type,
|
||||
})
|
||||
throw error
|
||||
} finally {
|
||||
this.updateJob({ id, runId: null })::ignoreErrors()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// @flow
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
|
||||
import debounceWithKey from '../_pDebounceWithKey'
|
||||
@@ -25,9 +24,14 @@ const METADATA_BACKUP_JOB_TYPE = 'metadataBackup'
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
const DEFAULT_RETENTION = 0
|
||||
|
||||
type ReportWhen = 'always' | 'failure' | 'never'
|
||||
|
||||
type Settings = {|
|
||||
retentionXoMetadata?: number,
|
||||
reportWhen?: ReportWhen,
|
||||
retentionPoolMetadata?: number,
|
||||
retentionXoMetadata?: number,
|
||||
|}
|
||||
|
||||
type MetadataBackupJob = {
|
||||
@@ -47,6 +51,22 @@ const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
return []
|
||||
})
|
||||
|
||||
const deleteOldBackups = (handler, dir, retention, handleError) =>
|
||||
handler.list(dir).then(list => {
|
||||
list.sort()
|
||||
list = list
|
||||
.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp))
|
||||
.slice(0, -retention)
|
||||
return Promise.all(
|
||||
list.map(timestamp => {
|
||||
const backupDir = `${dir}/${timestamp}`
|
||||
return handler
|
||||
.rmtree(backupDir)
|
||||
.catch(error => handleError(error, backupDir))
|
||||
})
|
||||
)
|
||||
}, handleError)
|
||||
|
||||
// metadata.json
|
||||
//
|
||||
// {
|
||||
@@ -73,6 +93,19 @@ const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
// └─ <YYYYMMDD>T<HHmmss>
|
||||
// ├─ metadata.json
|
||||
// └─ data
|
||||
//
|
||||
// Task logs emitted in a metadata backup execution:
|
||||
//
|
||||
// job.start(data: { reportWhen: ReportWhen })
|
||||
// ├─ task.start(data: { type: 'pool', id: string, pool: <Pool />, poolMaster: <Host /> })
|
||||
// │ ├─ task.start(data: { type: 'remote', id: string })
|
||||
// │ │ └─ task.end
|
||||
// │ └─ task.end
|
||||
// ├─ task.start(data: { type: 'xo' })
|
||||
// │ ├─ task.start(data: { type: 'remote', id: string })
|
||||
// │ │ └─ task.end
|
||||
// │ └─ task.end
|
||||
// └─ job.end
|
||||
export default class metadataBackup {
|
||||
_app: {
|
||||
createJob: (
|
||||
@@ -123,7 +156,293 @@ export default class metadataBackup {
|
||||
})
|
||||
}
|
||||
|
||||
async _executor({ cancelToken, job: job_, schedule }): Executor {
|
||||
async _backupXo({ handlers, job, logger, retention, runJobId, schedule }) {
|
||||
const app = this._app
|
||||
|
||||
const timestamp = Date.now()
|
||||
const taskId = logger.notice(`Starting XO metadata backup. (${job.id})`, {
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
event: 'task.start',
|
||||
parentId: runJobId,
|
||||
})
|
||||
|
||||
try {
|
||||
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${scheduleDir}/${safeDateFormat(timestamp)}`
|
||||
|
||||
const data = JSON.stringify(await app.exportConfig(), null, 2)
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(handlers, async (handler, remoteId) => {
|
||||
const subTaskId = logger.notice(
|
||||
`Starting XO metadata backup for the remote (${remoteId}). (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
event: 'task.start',
|
||||
parentId: taskId,
|
||||
}
|
||||
)
|
||||
|
||||
try {
|
||||
await Promise.all([
|
||||
handler.outputFile(fileName, data),
|
||||
handler.outputFile(metaDataFileName, metadata),
|
||||
])
|
||||
|
||||
await deleteOldBackups(
|
||||
handler,
|
||||
scheduleDir,
|
||||
retention,
|
||||
(error, backupDir) => {
|
||||
logger.warning(
|
||||
backupDir !== undefined
|
||||
? `unable to delete the folder ${backupDir}`
|
||||
: `unable to list backups for the remote (${remoteId})`,
|
||||
{
|
||||
event: 'task.warning',
|
||||
taskId: subTaskId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
logger.notice(
|
||||
`Backuping XO metadata for the remote (${remoteId}) is a success. (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
taskId: subTaskId,
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
await handler.rmtree(dir).catch(error => {
|
||||
logger.warning(`unable to delete the folder ${dir}`, {
|
||||
event: 'task.warning',
|
||||
taskId: subTaskId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
logger.error(
|
||||
`Backuping XO metadata for the remote (${remoteId}) has failed. (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId: subTaskId,
|
||||
}
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
logger.notice(`Backuping XO metadata is a success. (${job.id})`, {
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
taskId,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`Backuping XO metadata has failed. (${job.id})`, {
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _backupPool(
|
||||
poolId,
|
||||
{ cancelToken, handlers, job, logger, retention, runJobId, schedule, xapi }
|
||||
) {
|
||||
const poolMaster = await xapi
|
||||
.getRecord('host', xapi.pool.master)
|
||||
::ignoreErrors()
|
||||
const timestamp = Date.now()
|
||||
const taskId = logger.notice(
|
||||
`Starting metadata backup for the pool (${poolId}). (${job.id})`,
|
||||
{
|
||||
data: {
|
||||
id: poolId,
|
||||
pool: xapi.pool,
|
||||
poolMaster,
|
||||
type: 'pool',
|
||||
},
|
||||
event: 'task.start',
|
||||
parentId: runJobId,
|
||||
}
|
||||
)
|
||||
|
||||
try {
|
||||
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${poolId}`
|
||||
const dir = `${poolDir}/${safeDateFormat(timestamp)}`
|
||||
|
||||
// TODO: export the metadata only once then split the stream between remotes
|
||||
const stream = await xapi.exportPoolMetadata(cancelToken)
|
||||
const fileName = `${dir}/data`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
pool: xapi.pool,
|
||||
poolMaster,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(handlers, async (handler, remoteId) => {
|
||||
const subTaskId = logger.notice(
|
||||
`Starting metadata backup for the pool (${poolId}) for the remote (${remoteId}). (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
event: 'task.start',
|
||||
parentId: taskId,
|
||||
}
|
||||
)
|
||||
|
||||
let outputStream
|
||||
try {
|
||||
await Promise.all([
|
||||
(async () => {
|
||||
outputStream = await handler.createOutputStream(fileName)
|
||||
|
||||
// 'readable-stream/pipeline' not call the callback when an error throws
|
||||
// from the readable stream
|
||||
stream.pipe(outputStream)
|
||||
return fromEvent(stream, 'end').catch(error => {
|
||||
if (error.message !== 'aborted') {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
})(),
|
||||
handler.outputFile(metaDataFileName, metadata),
|
||||
])
|
||||
|
||||
await deleteOldBackups(
|
||||
handler,
|
||||
poolDir,
|
||||
retention,
|
||||
(error, backupDir) => {
|
||||
logger.warning(
|
||||
backupDir !== undefined
|
||||
? `unable to delete the folder ${backupDir}`
|
||||
: `unable to list backups for the remote (${remoteId})`,
|
||||
{
|
||||
event: 'task.warning',
|
||||
taskId: subTaskId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
logger.notice(
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) is a success. (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
taskId: subTaskId,
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
if (outputStream !== undefined) {
|
||||
outputStream.destroy()
|
||||
}
|
||||
await handler.rmtree(dir).catch(error => {
|
||||
logger.warning(`unable to delete the folder ${dir}`, {
|
||||
event: 'task.warning',
|
||||
taskId: subTaskId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
logger.error(
|
||||
`Backuping pool metadata (${poolId}) for the remote (${remoteId}) has failed. (${
|
||||
job.id
|
||||
})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId: subTaskId,
|
||||
}
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
logger.notice(
|
||||
`Backuping pool metadata (${poolId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
status: 'success',
|
||||
taskId,
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Backuping pool metadata (${poolId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId,
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async _executor({
|
||||
cancelToken,
|
||||
job: job_,
|
||||
logger,
|
||||
runJobId,
|
||||
schedule,
|
||||
}): Executor {
|
||||
if (schedule === undefined) {
|
||||
throw new Error('backup job cannot run without a schedule')
|
||||
}
|
||||
@@ -140,133 +459,103 @@ export default class metadataBackup {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const app = this._app
|
||||
const { retentionXoMetadata, retentionPoolMetadata } =
|
||||
job?.settings[schedule.id] || {}
|
||||
let { retentionXoMetadata, retentionPoolMetadata } =
|
||||
job.settings[schedule.id] || {}
|
||||
|
||||
const timestamp = Date.now()
|
||||
const formattedTimestamp = safeDateFormat(timestamp)
|
||||
const commonMetadata = {
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
// it also replaces null retentions introduced by the commit
|
||||
// https://github.com/vatesfr/xen-orchestra/commit/fea5117ed83b58d3a57715b32d63d46e3004a094#diff-c02703199db2a4c217943cf8e02b91deR40
|
||||
if (retentionXoMetadata == null) {
|
||||
retentionXoMetadata = DEFAULT_RETENTION
|
||||
}
|
||||
if (retentionPoolMetadata == null) {
|
||||
retentionPoolMetadata = DEFAULT_RETENTION
|
||||
}
|
||||
|
||||
const files = []
|
||||
if (job.xoMetadata && retentionXoMetadata > 0) {
|
||||
const xoMetadataDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${xoMetadataDir}/${formattedTimestamp}`
|
||||
|
||||
const data = JSON.stringify(await app.exportConfig(), null, 2)
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(commonMetadata, null, 2)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
files.push({
|
||||
executeBackup: defer(($defer, handler) => {
|
||||
$defer.onFailure(() => handler.rmtree(dir))
|
||||
return Promise.all([
|
||||
handler.outputFile(fileName, data),
|
||||
handler.outputFile(metaDataFileName, metadata),
|
||||
])
|
||||
}),
|
||||
dir: xoMetadataDir,
|
||||
retention: retentionXoMetadata,
|
||||
})
|
||||
}
|
||||
if (!isEmptyPools && retentionPoolMetadata > 0) {
|
||||
files.push(
|
||||
...(await Promise.all(
|
||||
poolIds.map(async id => {
|
||||
const poolMetadataDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${
|
||||
schedule.id
|
||||
}/${id}`
|
||||
const dir = `${poolMetadataDir}/${formattedTimestamp}`
|
||||
|
||||
// TODO: export the metadata only once then split the stream between remotes
|
||||
const stream = await app.getXapi(id).exportPoolMetadata(cancelToken)
|
||||
const fileName = `${dir}/data`
|
||||
|
||||
const xapi = this._app.getXapi(id)
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
...commonMetadata,
|
||||
pool: xapi.pool,
|
||||
poolMaster: await xapi.getRecord('host', xapi.pool.master),
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
return {
|
||||
executeBackup: defer(($defer, handler) => {
|
||||
$defer.onFailure(() => handler.rmtree(dir))
|
||||
return Promise.all([
|
||||
(async () => {
|
||||
const outputStream = await handler.createOutputStream(
|
||||
fileName
|
||||
)
|
||||
$defer.onFailure(() => outputStream.destroy())
|
||||
|
||||
// 'readable-stream/pipeline' not call the callback when an error throws
|
||||
// from the readable stream
|
||||
stream.pipe(outputStream)
|
||||
return fromEvent(stream, 'end').catch(error => {
|
||||
if (error.message !== 'aborted') {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
})(),
|
||||
handler.outputFile(metaDataFileName, metadata),
|
||||
])
|
||||
}),
|
||||
dir: poolMetadataDir,
|
||||
retention: retentionPoolMetadata,
|
||||
}
|
||||
})
|
||||
))
|
||||
)
|
||||
}
|
||||
|
||||
if (files.length === 0) {
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!job.xoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
cancelToken.throwIfRequested()
|
||||
|
||||
const timestampReg = /^\d{8}T\d{6}Z$/
|
||||
return asyncMap(
|
||||
// TODO: emit a warning task if a remote is broken
|
||||
asyncMap(remoteIds, id => app.getRemoteHandler(id)::ignoreErrors()),
|
||||
async handler => {
|
||||
if (handler === undefined) {
|
||||
return
|
||||
}
|
||||
const app = this._app
|
||||
|
||||
await Promise.all(
|
||||
files.map(async ({ executeBackup, dir, retention }) => {
|
||||
await executeBackup(handler)
|
||||
|
||||
// deleting old backups
|
||||
await handler.list(dir).then(list => {
|
||||
list.sort()
|
||||
list = list
|
||||
.filter(timestampDir => timestampReg.test(timestampDir))
|
||||
.slice(0, -retention)
|
||||
return Promise.all(
|
||||
list.map(timestampDir =>
|
||||
handler.rmtree(`${dir}/${timestampDir}`)
|
||||
)
|
||||
)
|
||||
const handlers = {}
|
||||
await Promise.all(
|
||||
remoteIds.map(id =>
|
||||
app.getRemoteHandler(id).then(
|
||||
handler => {
|
||||
handlers[id] = handler
|
||||
},
|
||||
error => {
|
||||
logger.warning(`unable to get the handler for the remote (${id})`, {
|
||||
event: 'task.warning',
|
||||
taskId: runJobId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if (Object.keys(handlers).length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const promises = []
|
||||
if (job.xoMetadata && retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
this._backupXo({
|
||||
handlers,
|
||||
job,
|
||||
logger,
|
||||
retention: retentionXoMetadata,
|
||||
runJobId,
|
||||
schedule,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
if (!isEmptyPools && retentionPoolMetadata !== 0) {
|
||||
poolIds.forEach(id => {
|
||||
let xapi
|
||||
try {
|
||||
xapi = this._app.getXapi(id)
|
||||
} catch (error) {
|
||||
logger.warning(
|
||||
`unable to get the xapi associated to the pool (${id})`,
|
||||
{
|
||||
event: 'task.warning',
|
||||
taskId: runJobId,
|
||||
data: {
|
||||
error,
|
||||
},
|
||||
}
|
||||
)
|
||||
}
|
||||
if (xapi !== undefined) {
|
||||
promises.push(
|
||||
this._backupPool(id, {
|
||||
cancelToken,
|
||||
handlers,
|
||||
job,
|
||||
logger,
|
||||
retention: retentionPoolMetadata,
|
||||
runJobId,
|
||||
schedule,
|
||||
xapi,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return Promise.all(promises)
|
||||
}
|
||||
|
||||
async createMetadataBackupJob(
|
||||
|
||||
@@ -168,7 +168,7 @@ export default class {
|
||||
}
|
||||
|
||||
@synchronized()
|
||||
async _updateRemote(id, { benchmarks, url, ...props }) {
|
||||
async _updateRemote(id, { url, ...props }) {
|
||||
const remote = await this._getRemote(id)
|
||||
|
||||
// url is handled separately to take care of obfuscated values
|
||||
@@ -176,13 +176,6 @@ export default class {
|
||||
remote.url = format(sensitiveValues.merge(parse(url), parse(remote.url)))
|
||||
}
|
||||
|
||||
if (
|
||||
benchmarks !== undefined ||
|
||||
(benchmarks = remote.benchmarks) !== undefined
|
||||
) {
|
||||
remote.benchmarks = JSON.stringify(benchmarks)
|
||||
}
|
||||
|
||||
patch(remote, props)
|
||||
|
||||
return (await this._remotes.update(remote)).properties
|
||||
|
||||
@@ -4,6 +4,7 @@ import { ignoreErrors } from 'promise-toolbox'
|
||||
import { hash, needsRehash, verify } from 'hashy'
|
||||
import { invalidCredentials, noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import * as XenStore from '../_XenStore'
|
||||
import { Groups } from '../models/group'
|
||||
import { Users } from '../models/user'
|
||||
import { forEach, isEmpty, lightSet, mapToArray } from '../utils'
|
||||
@@ -68,8 +69,12 @@ export default class {
|
||||
)
|
||||
|
||||
if (!(await usersDb.exists())) {
|
||||
const email = 'admin@admin.net'
|
||||
const password = 'admin'
|
||||
const {
|
||||
email = 'admin@admin.net',
|
||||
password = 'admin',
|
||||
} = await XenStore.read('vm-data/admin-account')
|
||||
.then(JSON.parse)
|
||||
.catch(() => ({}))
|
||||
|
||||
await this.createUser({ email, password, permission: 'admin' })
|
||||
log.info(`Default user created: ${email} with password ${password}`)
|
||||
|
||||
75
packages/xo-server/src/xo-mixins/subscriptions.js
Normal file
@@ -0,0 +1,75 @@
|
||||
import BloomFilter from 'mnemonist/bloom-filter'
|
||||
import forOwn from 'lodash/forOwn'
|
||||
import iteratee from 'lodash/iteratee'
|
||||
|
||||
const BLOOM_FILTER_CAPACITY = 50
|
||||
|
||||
export default class Subscriptions {
|
||||
constructor(app) {
|
||||
this._app = app
|
||||
|
||||
this._permCacheByUser = { __proto__: null }
|
||||
this._objCacheBySession = { __proto__: null }
|
||||
this._predicatesBySession = { __proto__: null }
|
||||
}
|
||||
|
||||
async subscribe(sessionId, userId, filter) {
|
||||
const predicatesBySession = this._predicatesBySession
|
||||
const predicates =
|
||||
predicatesBySession[sessionId] ??
|
||||
(predicatesBySession[sessionId] = { __proto__: null })
|
||||
|
||||
const subscriptionId = Math.random()
|
||||
.toString(36)
|
||||
.slice(2)
|
||||
const predicate = iteratee(filter)
|
||||
predicates[subscriptionId] = predicate
|
||||
|
||||
const objCacheBySession = this._objCacheBySession
|
||||
const objCache =
|
||||
objCacheBySession[sessionId] ??
|
||||
(objCacheBySession[sessionId] = { __proto__: null })
|
||||
|
||||
const objects = this.getObjects()
|
||||
const ids = Object.keys(objects)
|
||||
await Promise.all(
|
||||
ids.map(async id => {
|
||||
if (!(await this.hasPermissions(userId, [[id, 'view']]))) {
|
||||
// user cannot see this object
|
||||
return
|
||||
}
|
||||
|
||||
const cache =
|
||||
objCache[id] ??
|
||||
(objCache[id] = new BloomFilter(BLOOM_FILTER_CAPACITY))
|
||||
cache.add(subscriptionId)
|
||||
})
|
||||
)
|
||||
|
||||
return subscriptionId
|
||||
}
|
||||
|
||||
unsubscribe(sessionId, userId, subscriptionId) {
|
||||
const predicates = this._predicatesBySession[sessionId]
|
||||
if (predicates === undefined || !(subscriptionId in predicates)) {
|
||||
return
|
||||
}
|
||||
delete predicates[subscriptionId]
|
||||
|
||||
const objCache = this._objCacheBySession[sessionId]
|
||||
forOwn(objCache, (cache, id) => {
|
||||
if (!cache.test(subscriptionId)) {
|
||||
// not handled by this subscription
|
||||
return
|
||||
}
|
||||
|
||||
const object = this.getObject(id)
|
||||
cache = objCache[id] = new BloomFilter(BLOOM_FILTER_CAPACITY)
|
||||
forOwn(predicates, (predicate, subscriptionId) => {
|
||||
if (predicate(object)) {
|
||||
cache.add(subscriptionId)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import { fibonacci } from 'iterable-backoff'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
import { pDelay, ignoreErrors } from 'promise-toolbox'
|
||||
|
||||
import * as XenStore from '../_XenStore'
|
||||
import Xapi from '../xapi'
|
||||
import xapiObjectToXo from '../xapi-object-to-xo'
|
||||
import XapiStats from '../xapi-stats'
|
||||
@@ -64,8 +65,19 @@ export default class {
|
||||
servers => serversDb.update(servers)
|
||||
)
|
||||
|
||||
// Connects to existing servers.
|
||||
const servers = await serversDb.get()
|
||||
|
||||
// Add servers in XenStore
|
||||
if (servers.length === 0) {
|
||||
const xenStoreServers = await XenStore.read('vm-data/xen-servers')
|
||||
.then(JSON.parse)
|
||||
.catch(() => [])
|
||||
for (const server of xenStoreServers) {
|
||||
servers.push(await this.registerXenServer(server))
|
||||
}
|
||||
}
|
||||
|
||||
// Connects to existing servers.
|
||||
for (const server of servers) {
|
||||
if (server.enabled) {
|
||||
this.connectXenServer(server.id).catch(error => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-vmdk-to-vhd",
|
||||
"version": "0.1.6",
|
||||
"version": "0.1.7",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "JS lib streaming a vmdk file to a vhd",
|
||||
"keywords": [
|
||||
@@ -25,11 +25,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"child-process-promise": "^2.0.3",
|
||||
"core-js": "3.0.0",
|
||||
"core-js": "^3.0.0",
|
||||
"pipette": "^0.9.3",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"tmp": "^0.0.33",
|
||||
"vhd-lib": "^0.6.0"
|
||||
"tmp": "^0.1.0",
|
||||
"vhd-lib": "^0.6.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -50,6 +50,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
"prepare": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@ const GRAIN_ADDRESS_OFFSET = 56
|
||||
*/
|
||||
export default async function readVmdkGrainTable(fileAccessor) {
|
||||
const getLongLong = (buffer, offset, name) => {
|
||||
if (buffer.length < offset + 8) {
|
||||
if (buffer.byteLength < offset + 8) {
|
||||
throw new Error(
|
||||
`buffer ${name} is too short, expecting ${offset + 8} minimum, got ${
|
||||
buffer.length
|
||||
buffer.byteLength
|
||||
}`
|
||||
)
|
||||
}
|
||||
@@ -61,11 +61,12 @@ export default async function readVmdkGrainTable(fileAccessor) {
|
||||
const grainTablePhysicalSize = numGTEsPerGT * 4
|
||||
const grainDirectoryEntries = Math.ceil(grainCount / numGTEsPerGT)
|
||||
const grainDirectoryPhysicalSize = grainDirectoryEntries * 4
|
||||
const grainDirBuffer = await fileAccessor(
|
||||
grainDirPosBytes,
|
||||
grainDirPosBytes + grainDirectoryPhysicalSize
|
||||
const grainDir = new Uint32Array(
|
||||
await fileAccessor(
|
||||
grainDirPosBytes,
|
||||
grainDirPosBytes + grainDirectoryPhysicalSize
|
||||
)
|
||||
)
|
||||
const grainDir = new Uint32Array(grainDirBuffer)
|
||||
const cachedGrainTables = []
|
||||
for (let i = 0; i < grainDirectoryEntries; i++) {
|
||||
const grainTableAddr = grainDir[i] * SECTOR_SIZE
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"private": true,
|
||||
"name": "xo-web",
|
||||
"version": "5.37.0",
|
||||
"version": "5.40.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Web interface client for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -58,7 +58,7 @@
|
||||
"chartist-plugin-legend": "^0.6.1",
|
||||
"chartist-plugin-tooltip": "0.0.11",
|
||||
"classnames": "^2.2.3",
|
||||
"complex-matcher": "^0.5.0",
|
||||
"complex-matcher": "^0.6.0",
|
||||
"cookies-js": "^1.2.2",
|
||||
"copy-to-clipboard": "^3.0.8",
|
||||
"d3": "^5.0.0",
|
||||
@@ -95,12 +95,12 @@
|
||||
"moment": "^2.20.1",
|
||||
"moment-timezone": "^0.5.14",
|
||||
"notifyjs": "^3.0.0",
|
||||
"otplib": "^10.0.1",
|
||||
"otplib": "^11.0.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"prop-types": "^15.6.0",
|
||||
"qrcode": "^1.3.2",
|
||||
"random-password": "^0.1.2",
|
||||
"reaclette": "^0.7.0",
|
||||
"reaclette": "^0.8.0",
|
||||
"react": "^15.4.1",
|
||||
"react-addons-shallow-compare": "^15.6.2",
|
||||
"react-addons-test-utils": "^15.6.2",
|
||||
@@ -128,7 +128,7 @@
|
||||
"redux-thunk": "^2.0.1",
|
||||
"reselect": "^2.5.4",
|
||||
"rimraf": "^2.6.2",
|
||||
"semver": "^5.4.1",
|
||||
"semver": "^6.0.0",
|
||||
"styled-components": "^3.1.5",
|
||||
"uglify-es": "^3.3.4",
|
||||
"uncontrollable-input": "^0.1.1",
|
||||
@@ -142,7 +142,7 @@
|
||||
"xo-common": "^0.2.0",
|
||||
"xo-lib": "^0.8.0",
|
||||
"xo-remote-parser": "^0.5.0",
|
||||
"xo-vmdk-to-vhd": "^0.1.6"
|
||||
"xo-vmdk-to-vhd": "^0.1.7"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "NODE_ENV=production gulp build",
|
||||
|
||||
@@ -21,6 +21,37 @@ const showAvailableTemplateVars = () =>
|
||||
</ul>
|
||||
)
|
||||
|
||||
const showNetworkConfigInfo = () =>
|
||||
alert(
|
||||
_('newVmNetworkConfigLabel'),
|
||||
<div>
|
||||
<p>
|
||||
{_('newVmNetworkConfigInfo', {
|
||||
noCloudDatasourceLink: (
|
||||
<a
|
||||
href='https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html#datasource-nocloud'
|
||||
target='_blank'
|
||||
>
|
||||
{_('newVmNoCloudDatasource')}
|
||||
</a>
|
||||
),
|
||||
})}
|
||||
</p>
|
||||
<p>
|
||||
{_('newVmNetworkConfigDocLink', {
|
||||
networkConfigDocLink: (
|
||||
<a
|
||||
href='https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v1.html'
|
||||
target='_blank'
|
||||
>
|
||||
{_('newVmNetworkConfigDoc')}
|
||||
</a>
|
||||
),
|
||||
})}
|
||||
</p>
|
||||
</div>
|
||||
)
|
||||
|
||||
export const AvailableTemplateVars = () => (
|
||||
<Tooltip content={_('availableTemplateVarsInfo')}>
|
||||
<a
|
||||
@@ -33,5 +64,28 @@ export const AvailableTemplateVars = () => (
|
||||
</Tooltip>
|
||||
)
|
||||
|
||||
export const NetworkConfigInfo = () => (
|
||||
<Tooltip content={_('newVmNetworkConfigTooltip')}>
|
||||
<a
|
||||
className='text-info'
|
||||
style={{ cursor: 'pointer' }}
|
||||
onClick={showNetworkConfigInfo}
|
||||
>
|
||||
<Icon icon='info' />
|
||||
</a>
|
||||
</Tooltip>
|
||||
)
|
||||
|
||||
export const DEFAULT_CLOUD_CONFIG_TEMPLATE =
|
||||
'#cloud-config\n#hostname: {name}%\n#ssh_authorized_keys:\n# - ssh-rsa <myKey>\n#packages:\n# - htop\n'
|
||||
|
||||
// SOURCE: https://cloudinit.readthedocs.io/en/latest/topics/network-config-format-v1.html
|
||||
export const DEFAULT_NETWORK_CONFIG_TEMPLATE = `#network:
|
||||
# version: 1
|
||||
# config:
|
||||
# - type: physical
|
||||
# name: eth0
|
||||
# subnets:
|
||||
# - type: dhcp`
|
||||
|
||||
export const CAN_CLOUD_INIT = +process.env.XOA_PLAN > 3
|
||||
|
||||
@@ -150,6 +150,17 @@ class Editable extends Component {
|
||||
|
||||
render() {
|
||||
const { state, props } = this
|
||||
const { error, saving } = state
|
||||
|
||||
const ErrorTooltip = props =>
|
||||
props.error != null && (
|
||||
<span>
|
||||
{' '}
|
||||
<Tooltip content={error}>
|
||||
<Icon icon='error' />
|
||||
</Tooltip>
|
||||
</span>
|
||||
)
|
||||
|
||||
if (!state.editing) {
|
||||
const { onUndo, previous } = state
|
||||
@@ -184,12 +195,11 @@ class Editable extends Component {
|
||||
) : (
|
||||
success
|
||||
))}
|
||||
<ErrorTooltip error={error} />
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
const { error, saving } = state
|
||||
|
||||
return (
|
||||
<span>
|
||||
{this._renderEdition()}
|
||||
@@ -199,14 +209,7 @@ class Editable extends Component {
|
||||
<Icon icon='loading' />
|
||||
</span>
|
||||
)}
|
||||
{error != null && (
|
||||
<span>
|
||||
{' '}
|
||||
<Tooltip content={error}>
|
||||
<Icon icon='error' />
|
||||
</Tooltip>
|
||||
</span>
|
||||
)}
|
||||
<ErrorTooltip error={error} />
|
||||
</span>
|
||||
)
|
||||
}
|
||||
@@ -340,7 +343,9 @@ class SimpleSelect_ extends Editable {
|
||||
return this.state.value === undefined ? this.props.value : this.state.value
|
||||
}
|
||||
|
||||
_onChange = value => this.setState({ value }, this._save)
|
||||
_onChange = value => {
|
||||
this.setState({ value }, this._save)
|
||||
}
|
||||
|
||||
_renderDisplay() {
|
||||
const { children, optionRenderer, value } = this.props
|
||||
@@ -450,7 +455,9 @@ export class XoSelect extends Editable {
|
||||
)
|
||||
}
|
||||
|
||||
_onChange = object => this.setState({ value: object }, object && this._save)
|
||||
_onChange = object => {
|
||||
this.setState({ value: object }, object && this._save)
|
||||
}
|
||||
|
||||
_renderEdition() {
|
||||
const { saving, xoType, ...props } = this.props
|
||||
|
||||
@@ -3411,7 +3411,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
'Esta versión no está creada para recibir soporte ni actualizaciones. Úsala con precaución para tareas críticas.',
|
||||
'Esta versión no está creada para recibir soporte ni actualizaciones. Úsala con precaución.',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: 'Conectar PIF',
|
||||
|
||||
@@ -3016,7 +3016,8 @@ export default {
|
||||
"Le SR par défaut n'est pas connecté à l'hôte",
|
||||
|
||||
// Original text: "For each VDI, select an SR:"
|
||||
chooseSrForEachVdisModalSelectSr: 'Pour chaque VDI, sélectionner un SR :',
|
||||
chooseSrForEachVdisModalSelectSr:
|
||||
'Pour chaque VDI, sélectionner un SR (optionnel)',
|
||||
|
||||
// Original text: "Select main SR…"
|
||||
chooseSrForEachVdisModalMainSr: 'Sélectionner le SR principal…',
|
||||
@@ -3496,7 +3497,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
"Cette version n'est fournie avec aucun support ni aucune mise à jour. Soyez prudent en cas d'utilisation pour des tâches importantes.",
|
||||
"Cette version n'est fournie avec aucun support ni aucune mise à jour. Utilisez-la avec précaution.",
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: 'Connecter la PIF',
|
||||
|
||||
@@ -3248,7 +3248,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
'This Verzió is not bundled with any support nor upDates. Use it with caution for critical tasks.',
|
||||
'This Verzió is not bundled with any support nor upDates. Use it with caution.',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: 'Csatlakozás PIF',
|
||||
|
||||
@@ -2958,7 +2958,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
'This version is not bundled with any support nor updates. Use it with caution for critical tasks.',
|
||||
'This version is not bundled with any support nor updates. Use it with caution.',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: 'Connect PIF',
|
||||
|
||||
@@ -2944,7 +2944,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
'Esta versão não está vinculada a qualquer tipo de suporte nem atualizações. Use-a com cuidado em se tratando de tarefas críticas.',
|
||||
'Esta versão não está vinculada a qualquer tipo de suporte nem atualizações. Use-a com cuidado.',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: 'Conectar PIF',
|
||||
|
||||
@@ -4294,7 +4294,7 @@ export default {
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3:
|
||||
'Bu sürüm herhangi bir destek veya güncellemeyle birlikte verilmez. Kritik görevler için dikkatli kullanın.',
|
||||
'Bu sürüm herhangi bir destek veya güncellemeyle birlikte verilmez. Dikkatli kullanın.',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: "PIF'e bağlan",
|
||||
|
||||
@@ -2236,7 +2236,7 @@ export default {
|
||||
disclaimerText2: '如果你是一个公司,建议使用我们的设备结合专业的支持',
|
||||
|
||||
// Original text: "This version is not bundled with any support nor updates. Use it with caution for critical tasks."
|
||||
disclaimerText3: '这个版本没有绑定任何支持或更新,在紧急任务下,请谨慎使用',
|
||||
disclaimerText3: '这个版本没有绑定任何支持或更新,请谨慎使用',
|
||||
|
||||
// Original text: "Connect PIF"
|
||||
connectPif: '连接物理网卡',
|
||||
|
||||
@@ -46,6 +46,7 @@ const messages = {
|
||||
metadata: 'Metadata',
|
||||
chooseBackup: 'Choose a backup',
|
||||
clickToShowError: 'Click to show error',
|
||||
backupJobs: 'Backup jobs',
|
||||
|
||||
// ----- Modals -----
|
||||
alertOk: 'OK',
|
||||
@@ -430,6 +431,8 @@ const messages = {
|
||||
'This will migrate this backup to a backup NG. This operation is not reversible. Do you want to continue?',
|
||||
runBackupNgJobConfirm: 'Are you sure you want to run {name} ({id})?',
|
||||
cancelJobConfirm: 'Are you sure you want to cancel {name} ({id})?',
|
||||
scheduleDstWarning:
|
||||
'If your country participates in DST, it is advised that you avoid scheduling jobs at the time of change. e.g. 2AM to 3AM for US.',
|
||||
|
||||
// ------ New backup -----
|
||||
newBackupAdvancedSettings: 'Advanced settings',
|
||||
@@ -547,6 +550,7 @@ const messages = {
|
||||
remoteSmbPlaceHolderPassword: 'Password',
|
||||
remoteSmbPlaceHolderDomain: 'Domain',
|
||||
remoteSmbPlaceHolderAddressShare: '<address>\\\\<share> *',
|
||||
remoteSmbPlaceHolderOptions: 'Custom mount options',
|
||||
remotePlaceHolderPassword: 'password(fill to edit)',
|
||||
|
||||
// ------ New Storage -----
|
||||
@@ -1047,12 +1051,12 @@ const messages = {
|
||||
importVdi: 'Import VDI content',
|
||||
importVdiNoFile: 'No file selected',
|
||||
selectVdiMessage: 'Drop VHD file here',
|
||||
srsNotOnSameHost:
|
||||
'The SRs must either be shared or on the same host for the VM to be able to start.',
|
||||
useQuotaWarning:
|
||||
'Creating this disk will use the disk space quota from the resource set {resourceSet} ({spaceLeft} left)',
|
||||
notEnoughSpaceInResourceSet:
|
||||
'Not enough space in resource set {resourceSet} ({spaceLeft} left)',
|
||||
warningVdiSr:
|
||||
"The VDIs' SRs must either be shared or on the same host for the VM to be able to start.",
|
||||
|
||||
// ----- VM network tab -----
|
||||
vifCreateDeviceButton: 'New device',
|
||||
@@ -1281,6 +1285,9 @@ const messages = {
|
||||
spaceLeftTooltip: '{used}% used ({free} left)',
|
||||
|
||||
// ----- New VM -----
|
||||
createVmModalTitle: 'Create VM',
|
||||
createVmModalWarningMessage:
|
||||
"You're about to use a large amount of resources available on the resource set. Are you sure you want to continue?",
|
||||
newVmCreateNewVmOn: 'Create a new VM on {select}',
|
||||
newVmCreateNewVmNoPermission: 'You have no permission to create a VM',
|
||||
newVmInfoPanel: 'Infos',
|
||||
@@ -1314,6 +1321,7 @@ const messages = {
|
||||
newVmSshKey: 'SSH key',
|
||||
noConfigDrive: 'No config drive',
|
||||
newVmCustomConfig: 'Custom config',
|
||||
premiumOnly: 'Only available in Premium',
|
||||
availableTemplateVarsInfo:
|
||||
'Click here to see the available template variables',
|
||||
availableTemplateVarsTitle: 'Available template variables',
|
||||
@@ -1343,6 +1351,15 @@ const messages = {
|
||||
newVmHideAdvanced: 'Hide advanced settings',
|
||||
newVmShare: 'Share this VM',
|
||||
newVmSrsNotOnSameHost: 'The SRs must either be on the same host or shared',
|
||||
newVmNetworkConfigLabel: 'Network config',
|
||||
newVmNetworkConfigInfo:
|
||||
'Network configuration is only compatible with the {noCloudDatasourceLink}.',
|
||||
newVmNetworkConfigDocLink: 'See {networkConfigDocLink}.',
|
||||
newVmNetworkConfigTooltip:
|
||||
'Click here to get more information about network config',
|
||||
newVmUserConfigLabel: 'User config',
|
||||
newVmNoCloudDatasource: 'NoCloud datasource',
|
||||
newVmNetworkConfigDoc: 'Network config documentation',
|
||||
|
||||
// ----- Self -----
|
||||
resourceSets: 'Resource sets',
|
||||
@@ -1605,7 +1622,7 @@ const messages = {
|
||||
migrateVmNoTargetHostMessage: 'A target host is required to migrate a VM',
|
||||
migrateVmNoDefaultSrError: 'No default SR',
|
||||
migrateVmNotConnectedDefaultSrError: 'Default SR not connected to host',
|
||||
chooseSrForEachVdisModalSelectSr: 'For each VDI, select an SR:',
|
||||
chooseSrForEachVdisModalSelectSr: 'For each VDI, select an SR (optional)',
|
||||
chooseSrForEachVdisModalMainSr: 'Select main SR…',
|
||||
chooseSrForEachVdisModalVdiLabel: 'VDI',
|
||||
chooseSrForEachVdisModalSrLabel: 'SR*',
|
||||
@@ -1802,7 +1819,6 @@ const messages = {
|
||||
// ----- Updates View -----
|
||||
updateTitle: 'Updates',
|
||||
registration: 'Registration',
|
||||
trial: 'Trial',
|
||||
settings: 'Settings',
|
||||
proxySettings: 'Proxy settings',
|
||||
proxySettingsHostPlaceHolder: 'Host (myproxy.example.org)',
|
||||
@@ -1853,7 +1869,7 @@ const messages = {
|
||||
disclaimerText2:
|
||||
"If you are a company, it's better to use it with our appliance + pro support included:",
|
||||
disclaimerText3:
|
||||
'This version is not bundled with any support nor updates. Use it with caution for critical tasks.',
|
||||
'This version is not bundled with any support nor updates. Use it with caution.',
|
||||
notRegisteredDisclaimerInfo:
|
||||
'You are not registered. Your XOA may not be up to date.',
|
||||
notRegisteredDisclaimerCreateAccount: 'Click here to create an account.',
|
||||
|
||||
@@ -409,8 +409,7 @@ const xoItemToRender = {
|
||||
<span>
|
||||
<strong>
|
||||
<Icon icon='resource-set' /> {resourceSet.name}
|
||||
</strong>{' '}
|
||||
({resourceSet.id})
|
||||
</strong>
|
||||
</span>
|
||||
),
|
||||
sshKey: key => (
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import classNames from 'classnames'
|
||||
import Icon from 'icon'
|
||||
import PropTypes from 'prop-types'
|
||||
import React from 'react'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
@@ -457,6 +458,9 @@ export default class Scheduler extends Component {
|
||||
|
||||
return (
|
||||
<div className='card-block'>
|
||||
<em>
|
||||
<Icon icon='info' /> {_('scheduleDstWarning')}
|
||||
</em>
|
||||
<Row>
|
||||
<Col largeSize={6}>
|
||||
<TimePicker
|
||||
|
||||
@@ -213,6 +213,8 @@ export const getStatus = state => state.status
|
||||
|
||||
export const getUser = state => state.user
|
||||
|
||||
export const getXoaState = state => state.xoaUpdaterState
|
||||
|
||||
export const getCheckPermissions = invoke(() => {
|
||||
const getPredicate = create(
|
||||
state => state.permissions,
|
||||
@@ -273,13 +275,13 @@ const _getPermissionsPredicate = invoke(() => {
|
||||
}
|
||||
)
|
||||
|
||||
return state => {
|
||||
return (state, props, useResourceSet) => {
|
||||
const user = getUser(state)
|
||||
if (!user) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (user.permission === 'admin') {
|
||||
if (user.permission === 'admin' || useResourceSet) {
|
||||
return // No predicate means no filtering.
|
||||
}
|
||||
|
||||
|
||||