Compare commits
82 Commits
xo-web-v5.
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d1eaaaade | ||
|
|
bdad6c0f6d | ||
|
|
ff1ca5d933 | ||
|
|
2cf4c494a4 | ||
|
|
95ac0a861a | ||
|
|
746c301f39 | ||
|
|
6455b12b58 | ||
|
|
485b8fe993 | ||
|
|
d7527f280c | ||
|
|
d57fa4375d | ||
|
|
d9e42c6625 | ||
|
|
28293d3fce | ||
|
|
d505401446 | ||
|
|
fafc24aeae | ||
|
|
f78ef0d208 | ||
|
|
8384cc3652 | ||
|
|
60aa18a229 | ||
|
|
3d64b42a89 | ||
|
|
b301997d4b | ||
|
|
ab34743250 | ||
|
|
bc14a1d167 | ||
|
|
2886ec116f | ||
|
|
c2beb2a5fa | ||
|
|
d6ac10f527 | ||
|
|
9dcd8a707a | ||
|
|
e1e97ef158 | ||
|
|
5d6b37f81a | ||
|
|
e1da08ba38 | ||
|
|
1dfb50fefd | ||
|
|
5c06ebc9c8 | ||
|
|
52a9270fb0 | ||
|
|
82247d7422 | ||
|
|
b34688043f | ||
|
|
ce4bcbd19d | ||
|
|
cde9a02c32 | ||
|
|
fe1da4ea12 | ||
|
|
a73306817b | ||
|
|
54e683d3d4 | ||
|
|
f49910ca82 | ||
|
|
4052f7f736 | ||
|
|
b47e097983 | ||
|
|
e44dbfb2a4 | ||
|
|
7d69dd9400 | ||
|
|
e6aae8fcfa | ||
|
|
da800b3391 | ||
|
|
3a574bcecc | ||
|
|
1bb0e234e7 | ||
|
|
b7e14ebf2a | ||
|
|
2af1207702 | ||
|
|
ecfed30e6e | ||
|
|
d06c3e3dd8 | ||
|
|
16b3fbeb16 | ||
|
|
0938804947 | ||
|
|
851bcf9816 | ||
|
|
9f6fc785bc | ||
|
|
56636bf5d4 | ||
|
|
3899a65167 | ||
|
|
628e53c1c3 | ||
|
|
9fa424dd8d | ||
|
|
3e6f2eecfa | ||
|
|
cc655c8ba8 | ||
|
|
78aa0474ee | ||
|
|
9caefa2f49 | ||
|
|
478726fa3b | ||
|
|
f64917ec52 | ||
|
|
2bc25f91c4 | ||
|
|
623d7ffe2f | ||
|
|
07510b5099 | ||
|
|
9f21f9a7bc | ||
|
|
93da70709e | ||
|
|
00436e744a | ||
|
|
1e642fc512 | ||
|
|
6baef2450c | ||
|
|
600f34f85a | ||
|
|
6c0c6bc5c4 | ||
|
|
fcd62ed3cd | ||
|
|
785f2e3a6d | ||
|
|
c2925f7c1e | ||
|
|
60814d8b58 | ||
|
|
2dec448f2c | ||
|
|
b71f4f6800 | ||
|
|
558083a916 |
11
.eslintrc.js
11
.eslintrc.js
@@ -16,6 +16,16 @@ module.exports = {
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['packages/*cli*/**/*.js', '*-cli.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
@@ -23,6 +33,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
|
||||
@@ -46,6 +46,12 @@ const getConfig = (key, ...args) => {
|
||||
: config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
|
||||
module.exports = function(pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
presets === undefined && (presets = {})
|
||||
@@ -61,7 +67,13 @@ module.exports = function(pkg, plugins, presets) {
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
const oA = pluginsOrder.indexOf(a)
|
||||
const oB = pluginsOrder.indexOf(b)
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,6 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.24.5"
|
||||
"xen-api": "^0.25.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.7.1",
|
||||
"version": "0.8.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
@@ -24,12 +24,12 @@
|
||||
"@marsaud/smb2": "^0.13.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.3.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
@@ -45,7 +45,7 @@
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^6.1.0",
|
||||
"dotenv": "^7.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -25,6 +25,10 @@ type RemoteInfo = { used?: number, size?: number }
|
||||
type File = FileDescriptor | string
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime: number[], size: number) => {
|
||||
const seconds = hrtime[0] + hrtime[1] / 1e9
|
||||
return size / seconds
|
||||
}
|
||||
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
|
||||
@@ -362,18 +366,27 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async test(): Promise<Object> {
|
||||
const SIZE = 1024 * 1024 * 10
|
||||
const testFileName = normalizePath(`${Date.now()}.test`)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
const data = await fromCallback(cb => randomBytes(SIZE, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
const writeStart = process.hrtime()
|
||||
await this._outputFile(testFileName, data, { flags: 'wx' })
|
||||
const writeDuration = process.hrtime(writeStart)
|
||||
|
||||
step = 'read'
|
||||
const readStart = process.hrtime()
|
||||
const read = await this._readFile(testFileName, { flags: 'r' })
|
||||
const readDuration = process.hrtime(readStart)
|
||||
|
||||
if (!data.equals(read)) {
|
||||
throw new Error('output and input did not match')
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
writeRate: computeRate(writeDuration, SIZE),
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -290,9 +290,11 @@ handlers.forEach(url => {
|
||||
|
||||
describe('#test()', () => {
|
||||
it('tests the remote appears to be working', async () => {
|
||||
expect(await handler.test()).toEqual({
|
||||
success: true,
|
||||
})
|
||||
const answer = await handler.test()
|
||||
|
||||
expect(answer.success).toBe(true)
|
||||
expect(typeof answer.writeRate).toBe('number')
|
||||
expect(typeof answer.readRate).toBe('number')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
47
CHANGELOG.md
47
CHANGELOG.md
@@ -1,5 +1,52 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
|
||||
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
|
||||
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
|
||||
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
|
||||
- Enable compression for HTTP requests (and initial objects fetch)
|
||||
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
- Improve connection to XCP-ng/XenServer hosts:
|
||||
- never disconnect by itself even in case of errors
|
||||
- never stop watching events
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
|
||||
- [Google authentication] Migrate to new endpoint
|
||||
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
|
||||
## **5.32.2** (2019-02-28)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -2,27 +2,9 @@
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
|
||||
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
|
||||
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
|
||||
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
|
||||
- Enable compression for HTTP requests (and initial objects fetch)
|
||||
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
|
||||
- [Google authentication] Migrate to new endpoint
|
||||
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server-auth-google v0.2.1
|
||||
- xo-server v5.37.0
|
||||
- xo-web v5.37.0
|
||||
- xo-server v5.39.0
|
||||
- xo-web v5.39.0
|
||||
|
||||
BIN
docs/assets/cr-seed-1.png
Normal file
BIN
docs/assets/cr-seed-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/assets/cr-seed-2.png
Normal file
BIN
docs/assets/cr-seed-2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/cr-seed-3.png
Normal file
BIN
docs/assets/cr-seed-3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
BIN
docs/assets/cr-seed-4.png
Normal file
BIN
docs/assets/cr-seed-4.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
@@ -12,7 +12,9 @@ Another good way to check if there is activity is the XOA VM stats view (on the
|
||||
|
||||
### VDI chain protection
|
||||
|
||||
This means your previous VM disks and snapshots should be "merged" (*coalesced* in the XenServer world) before we can take a new snapshot. This mechanism is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. Otherwise, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product dealing with this.
|
||||
Backup jobs regularly delete snapshots. When a snapshot is deleted, either manually or via a backup job, it triggers the need for Xenserver to coalesce the VDI chain - to merge the remaining VDIs and base copies in the chain. This means generally we cannot take too many new snapshots on said VM until Xenserver has finished running a coalesce job on the VDI chain.
|
||||
|
||||
This mechanism and scheduling is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. If we don't, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product that takes this into account and offers protection.
|
||||
|
||||
Without this detection, you could have 2 potential issues:
|
||||
|
||||
@@ -21,9 +23,9 @@ Without this detection, you could have 2 potential issues:
|
||||
|
||||
The first issue is a chain that contains more than 30 elements (fixed XenServer limit), and the other one means it's full because the "coalesce" process couldn't keep up the pace and the storage filled up.
|
||||
|
||||
In the end, this message is a **protection mechanism against damaging your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
In the end, this message is a **protection mechanism preventing damage to your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
|
||||
Just remember this: **coalesce will happen every time a snapshot is removed**.
|
||||
Just remember this: **a coalesce should happen every time a snapshot is removed**.
|
||||
|
||||
> You can read more on this on our dedicated blog post regarding [XenServer coalesce detection](https://xen-orchestra.com/blog/xenserver-coalesce-detection-in-xen-orchestra/).
|
||||
|
||||
@@ -37,7 +39,9 @@ Coalesce jobs can also fail to run if the SR does not have enough free space. Ch
|
||||
|
||||
You can check if a coalesce job is currently active by running `ps axf | grep vhd` on the XenServer host and looking for a VHD process in the results (one of the resulting processes will be the grep command you just ran, ignore that one).
|
||||
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
|
||||
As a last resort, migrating the VM (more specifically, its disks) to a new storage repository will also force a coalesce and solve this issue. That means migrating a VM to another host (with its own storage) and back will force the VDI chain for that VM to be coalesced, and get rid of the `VDI Chain Protection` message.
|
||||
|
||||
### Parse Error
|
||||
|
||||
|
||||
@@ -43,11 +43,19 @@ To protect the replication, we removed the possibility to boot your copied VM di
|
||||
|
||||
### Job creation
|
||||
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, note its identifiers, the main `backupJobId` and the ID of one on the schedules for the job, `backupScheduleId`.
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, copy the job's `backupJobId` by hovering to the left of the shortened ID and clicking the copy to clipboard button:
|
||||
|
||||

|
||||
|
||||
Copy it somewhere temporarily. Now we need to also copy the ID of the job schedule, `backupScheduleId`. Do this by hovering over the schedule name in the same panel as before, and clicking the copy to clipboard button. Keep it with the `backupJobId` you copied previously as we will need them all later:
|
||||
|
||||

|
||||
|
||||
### Seed creation
|
||||
|
||||
Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUuid` from the snapshot panel for the VM.
|
||||
Manually create a snapshot on the VM being backed up, then copy this snapshot UUID, `snapshotUuid` from the snapshot panel of the VM:
|
||||
|
||||

|
||||
|
||||
> DO NOT ever delete or alter this snapshot, feel free to rename it to make that clear.
|
||||
|
||||
@@ -55,7 +63,9 @@ Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUu
|
||||
|
||||
Export this snapshot to a file, then import it on the target SR.
|
||||
|
||||
Note the UUID of this newly created VM as `targetVmUuid`.
|
||||
We need to copy the UUID of this newly created VM as well, `targetVmUuid`:
|
||||
|
||||

|
||||
|
||||
> DO not start this VM or it will break the Continuous Replication job! You can rename this VM to more easily remember this.
|
||||
|
||||
|
||||
@@ -16,13 +16,13 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.94.0",
|
||||
"flow-bin": "^0.95.1",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
|
||||
@@ -27,12 +27,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.5.1"
|
||||
"vhd-lib": "^0.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -42,7 +42,7 @@
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
|
||||
@@ -1,38 +1,40 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
|
||||
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
|
||||
|
||||
import { SECTOR_SIZE } from './src/_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile(name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
async function createRandomFile(name, sizeMB) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = createRandomStream(sizeMB * 1024 * 1024)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
async function checkFile(vhdName) {
|
||||
@@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const vhd = new Vhd(handler, emptyFileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
await checkFile(emptyFileName)
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
@@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const recoveredFile = await fs.open(recoveredFileName, 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
const vhd2 = new Vhd(handler, emptyFileName)
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
@@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
await fs.createReadStream(recoveredFileName, {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
@@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
@@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
@@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
emptyFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler._getSize(rawFileName)
|
||||
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
const randomFileName = `${tempDir}/randomfile`
|
||||
const random2FileName = `${tempDir}/randomfile2`
|
||||
const smallRandomFileName = `${tempDir}/small_randomfile`
|
||||
const parentFileName = `${tempDir}/parent.vhd`
|
||||
const child1FileName = `${tempDir}/child1.vhd`
|
||||
const child2FileName = `${tempDir}/child2.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(randomFileName, mbOfRandom)
|
||||
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
parentFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await convertFromRawToVhd(randomFileName, child1FileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
await execa('vhd-util', [
|
||||
'snapshot',
|
||||
'-n',
|
||||
child2FileName,
|
||||
'-p',
|
||||
child1FileName,
|
||||
])
|
||||
const vhd = new Vhd(handler, child2FileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
const originalSize = await handler._getSize(randomFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
|
||||
await chainVhd(handler, child1FileName, handler, child2FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
|
||||
const smallRandom = await fs.readFile(smallRandomFileName)
|
||||
const newVhd = new Vhd(handler, child2FileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
await checkFile(child2FileName)
|
||||
await checkFile(child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child2FileName, true)
|
||||
await checkFile(child2FileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child2FileName)
|
||||
await checkFile(parentFileName)
|
||||
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
|
||||
await execa('cp', [randomFileName, random2FileName])
|
||||
const fd = await fs.open(random2FileName, 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(random2FileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
test.only('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
const expectedVhdSize = 4197888
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
|
||||
expect(stream.length).toEqual(expectedVhdSize)
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await checkFile(vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const stream = await createSyntheticStream(handler, vhdFileName)
|
||||
const expectedVhdSize = (await fs.stat(vhdFileName)).size
|
||||
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
const stats = await fs.stat('recovered.vhd')
|
||||
await checkFile(recoveredVhdFileName)
|
||||
const stats = await fs.stat(recoveredVhdFileName)
|
||||
expect(stats.size).toEqual(expectedVhdSize)
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.5.1",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
@@ -22,11 +22,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0-beta.3",
|
||||
"core-js": "3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -35,13 +35,14 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
|
||||
20
packages/vhd-lib/src/_checkFooter.js
Normal file
20
packages/vhd-lib/src/_checkFooter.js
Normal file
@@ -0,0 +1,20 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import {
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
export default footer => {
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
}
|
||||
14
packages/vhd-lib/src/_checkHeader.js
Normal file
14
packages/vhd-lib/src/_checkHeader.js
Normal file
@@ -0,0 +1,14 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default (header, footer) => {
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
if (footer !== undefined) {
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
}
|
||||
}
|
||||
47
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
47
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
@@ -0,0 +1,47 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { BLOCK_UNUSED } from './_constants'
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
export default bat => {
|
||||
const n = bat.length
|
||||
assert.notStrictEqual(n, 0)
|
||||
assert.strictEqual(n % 4, 0)
|
||||
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (j === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (j < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
50
packages/vhd-lib/src/_readChunk.js
Normal file
50
packages/vhd-lib/src/_readChunk.js
Normal file
@@ -0,0 +1,50 @@
|
||||
export default async function readChunk(stream, n) {
|
||||
if (n === 0) {
|
||||
return Buffer.alloc(0)
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let i = 0
|
||||
|
||||
function clean() {
|
||||
stream.removeListener('readable', onReadable)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
|
||||
function resolve2() {
|
||||
clean()
|
||||
resolve(Buffer.concat(chunks, i))
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve2()
|
||||
clean()
|
||||
}
|
||||
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
|
||||
function onReadable() {
|
||||
const chunk = stream.read(n - i)
|
||||
if (chunk === null) {
|
||||
return // wait for more data
|
||||
}
|
||||
i += chunk.length
|
||||
chunks.push(chunk)
|
||||
if (i >= n) {
|
||||
resolve2()
|
||||
}
|
||||
}
|
||||
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
|
||||
if (stream.readable) {
|
||||
onReadable()
|
||||
}
|
||||
})
|
||||
}
|
||||
93
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
93
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
@@ -0,0 +1,93 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import rimraf from 'rimraf'
|
||||
import getStream from 'get-stream'
|
||||
import tmp from 'tmp'
|
||||
import { createReadStream, createWriteStream } from 'fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createVhdStreamWithLength } from '.'
|
||||
import { FOOTER_SIZE } from './_constants'
|
||||
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function convertFromRawToVhd(rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
|
||||
async function createRandomFile(name, size) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = await createRandomStream(size)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
test('createVhdStreamWithLength can extract length', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
})
|
||||
|
||||
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const longerSize = fs.statSync(vhdName).size
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
80
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
80
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
@@ -0,0 +1,80 @@
|
||||
import assert from 'assert'
|
||||
import { pipeline, Transform } from 'readable-stream'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import noop from './_noop'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import readChunk from './_readChunk'
|
||||
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
|
||||
import { fuFooter, fuHeader } from './_structs'
|
||||
|
||||
class EndCutterStream extends Transform {
|
||||
constructor(footerOffset, footerBuffer) {
|
||||
super()
|
||||
this._footerOffset = footerOffset
|
||||
this._footerBuffer = footerBuffer
|
||||
this._position = 0
|
||||
this._done = false
|
||||
}
|
||||
|
||||
_transform(data, encoding, callback) {
|
||||
if (!this._done) {
|
||||
if (this._position + data.length >= this._footerOffset) {
|
||||
this._done = true
|
||||
const difference = this._footerOffset - this._position
|
||||
data = data.slice(0, difference)
|
||||
this.push(data)
|
||||
this.push(this._footerBuffer)
|
||||
} else {
|
||||
this.push(data)
|
||||
}
|
||||
this._position += data.length
|
||||
}
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
export default async function createVhdStreamWithLength(stream) {
|
||||
const readBuffers = []
|
||||
let streamPosition = 0
|
||||
|
||||
async function readStream(length) {
|
||||
const chunk = await readChunk(stream, length)
|
||||
assert.strictEqual(chunk.length, length)
|
||||
streamPosition += chunk.length
|
||||
readBuffers.push(chunk)
|
||||
return chunk
|
||||
}
|
||||
|
||||
const footerBuffer = await readStream(FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
|
||||
checkHeader(header, footer)
|
||||
|
||||
await readStream(header.tableOffset - streamPosition)
|
||||
|
||||
const table = await readStream(header.maxTableEntries * 4)
|
||||
|
||||
readBuffers.reverse()
|
||||
for (const buf of readBuffers) {
|
||||
stream.unshift(buf)
|
||||
}
|
||||
|
||||
const footerOffset =
|
||||
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
|
||||
header.blockSize
|
||||
|
||||
// ignore any data after footerOffset and push footerBuffer
|
||||
//
|
||||
// this is necessary to ignore any blank space between the last block and the
|
||||
// final footer which would invalidate the size we computed
|
||||
const newStream = new EndCutterStream(footerOffset, footerBuffer)
|
||||
pipeline(stream, newStream, noop)
|
||||
|
||||
newStream.length = footerOffset + FOOTER_SIZE
|
||||
return newStream
|
||||
}
|
||||
@@ -11,3 +11,6 @@ export {
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
export {
|
||||
default as createVhdStreamWithLength,
|
||||
} from './createVhdStreamWithLength'
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import constantStream from './_constant-stream'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
@@ -170,21 +167,10 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
checkHeader(header, footer)
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
@@ -242,49 +228,6 @@ export default class Vhd {
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks() {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
@@ -311,7 +254,9 @@ export default class Vhd {
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
|
||||
this.blockTable
|
||||
)
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
|
||||
@@ -4,22 +4,20 @@ import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createReadableRawStream, createReadableSparseStream } from './'
|
||||
|
||||
import { createFooter } from './src/_createFooterHeader'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
@@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
|
||||
)
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
@@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
|
||||
err ? reject(err) : resolve()
|
||||
)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
@@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
blocks
|
||||
)
|
||||
expect(stream.length).toEqual(4197888)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
`${tempDir}/output.vhd`,
|
||||
`${tempDir}/out1.raw`,
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.24.5"
|
||||
"xen-api": "^0.25.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
|
||||
@@ -4,7 +4,7 @@ const { PassThrough, pipeline } = require('readable-stream')
|
||||
const humanFormat = require('human-format')
|
||||
const Throttle = require('throttle')
|
||||
|
||||
const { isOpaqueRef } = require('../')
|
||||
const isOpaqueRef = require('../dist/_isOpaqueRef').default
|
||||
|
||||
exports.createInputStream = path => {
|
||||
if (path === undefined || path === '-') {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.24.5",
|
||||
"version": "0.25.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -37,8 +37,7 @@
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.7.2",
|
||||
"iterable-backoff": "^0.1.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
@@ -46,7 +45,7 @@
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -54,7 +53,10 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-class-properties": "^7.3.4",
|
||||
"@babel/plugin-proposal-decorators": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.2.0",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
|
||||
30
packages/xen-api/src/_XapiError.js
Normal file
30
packages/xen-api/src/_XapiError.js
Normal file
@@ -0,0 +1,30 @@
|
||||
import { BaseError } from 'make-error'
|
||||
|
||||
export default class XapiError extends BaseError {
|
||||
static wrap(error) {
|
||||
let code, params
|
||||
if (Array.isArray(error)) {
|
||||
// < XenServer 7.3
|
||||
;[code, ...params] = error
|
||||
} else {
|
||||
code = error.message
|
||||
params = error.data
|
||||
if (!Array.isArray(params)) {
|
||||
params = []
|
||||
}
|
||||
}
|
||||
return new XapiError(code, params)
|
||||
}
|
||||
|
||||
constructor(code, params) {
|
||||
super(`${code}(${params.join(', ')})`)
|
||||
|
||||
this.code = code
|
||||
this.params = params
|
||||
|
||||
// slots than can be assigned later
|
||||
this.call = undefined
|
||||
this.url = undefined
|
||||
this.task = undefined
|
||||
}
|
||||
}
|
||||
15
packages/xen-api/src/_coalesceCalls.js
Normal file
15
packages/xen-api/src/_coalesceCalls.js
Normal file
@@ -0,0 +1,15 @@
|
||||
// decorates fn so that more than one concurrent calls will be coalesced
|
||||
export default function coalesceCalls(fn) {
|
||||
let promise
|
||||
const clean = () => {
|
||||
promise = undefined
|
||||
}
|
||||
return function() {
|
||||
if (promise !== undefined) {
|
||||
return promise
|
||||
}
|
||||
promise = fn.apply(this, arguments)
|
||||
promise.then(clean, clean)
|
||||
return promise
|
||||
}
|
||||
}
|
||||
26
packages/xen-api/src/_coalesceCalls.spec.js
Normal file
26
packages/xen-api/src/_coalesceCalls.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
|
||||
import coalesceCalls from './_coalesceCalls'
|
||||
|
||||
describe('coalesceCalls', () => {
|
||||
it('decorates an async function', async () => {
|
||||
const fn = coalesceCalls(promise => promise)
|
||||
|
||||
const defer1 = pDefer()
|
||||
const promise1 = fn(defer1.promise)
|
||||
const defer2 = pDefer()
|
||||
const promise2 = fn(defer2.promise)
|
||||
|
||||
defer1.resolve('foo')
|
||||
expect(await promise1).toBe('foo')
|
||||
expect(await promise2).toBe('foo')
|
||||
|
||||
const defer3 = pDefer()
|
||||
const promise3 = fn(defer3.promise)
|
||||
|
||||
defer3.resolve('bar')
|
||||
expect(await promise3).toBe('bar')
|
||||
})
|
||||
})
|
||||
3
packages/xen-api/src/_debug.js
Normal file
3
packages/xen-api/src/_debug.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import debug from 'debug'
|
||||
|
||||
export default debug('xen-api')
|
||||
22
packages/xen-api/src/_getTaskResult.js
Normal file
22
packages/xen-api/src/_getTaskResult.js
Normal file
@@ -0,0 +1,22 @@
|
||||
import { Cancel } from 'promise-toolbox'
|
||||
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
export default task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
const error = XapiError.wrap(task.error_info)
|
||||
error.task = task
|
||||
return Promise.reject(error)
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
3
packages/xen-api/src/_isGetAllRecordsMethod.js
Normal file
3
packages/xen-api/src/_isGetAllRecordsMethod.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const SUFFIX = '.get_all_records'
|
||||
|
||||
export default method => method.endsWith(SUFFIX)
|
||||
3
packages/xen-api/src/_isOpaqueRef.js
Normal file
3
packages/xen-api/src/_isOpaqueRef.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const PREFIX = 'OpaqueRef:'
|
||||
|
||||
export default value => typeof value === 'string' && value.startsWith(PREFIX)
|
||||
4
packages/xen-api/src/_isReadOnlyCall.js
Normal file
4
packages/xen-api/src/_isReadOnlyCall.js
Normal file
@@ -0,0 +1,4 @@
|
||||
const RE = /^[^.]+\.get_/
|
||||
|
||||
export default (method, args) =>
|
||||
args.length === 1 && typeof args[0] === 'string' && RE.test(method)
|
||||
8
packages/xen-api/src/_makeCallSetting.js
Normal file
8
packages/xen-api/src/_makeCallSetting.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export default (setting, defaultValue) =>
|
||||
setting === undefined
|
||||
? () => defaultValue
|
||||
: typeof setting === 'function'
|
||||
? setting
|
||||
: typeof setting === 'object'
|
||||
? method => setting[method] ?? setting['*'] ?? defaultValue
|
||||
: () => setting
|
||||
18
packages/xen-api/src/_parseUrl.js
Normal file
18
packages/xen-api/src/_parseUrl.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
if (matches === null) {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
}
|
||||
if (password !== undefined) {
|
||||
parsedUrl.password = decodeURIComponent(password)
|
||||
}
|
||||
return parsedUrl
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
3
packages/xen-api/src/transports/_UnsupportedTransport.js
Normal file
3
packages/xen-api/src/transports/_UnsupportedTransport.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export default makeError('UnsupportedTransport')
|
||||
25
packages/xen-api/src/transports/_prepareXmlRpcParams.js
Normal file
25
packages/xen-api/src/transports/_prepareXmlRpcParams.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
// - cast integers to strings
|
||||
export default function prepare(param) {
|
||||
if (Number.isInteger(param)) {
|
||||
return String(param)
|
||||
}
|
||||
|
||||
if (typeof param !== 'object' || param === null) {
|
||||
return param
|
||||
}
|
||||
|
||||
if (Array.isArray(param)) {
|
||||
return param.map(prepare)
|
||||
}
|
||||
|
||||
const values = {}
|
||||
Object.keys(param).forEach(key => {
|
||||
const value = param[key]
|
||||
if (value !== undefined) {
|
||||
values[key] = prepare(value)
|
||||
}
|
||||
})
|
||||
return values
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export const UnsupportedTransport = makeError('UnsupportedTransport')
|
||||
@@ -1,7 +1,7 @@
|
||||
import jsonRpc from './json-rpc'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
import xmlRpc from './xml-rpc'
|
||||
import xmlRpcJson from './xml-rpc-json'
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
|
||||
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
|
||||
const { length } = factories
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import httpRequestPlus from 'http-request-plus'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
|
||||
export default ({ allowUnauthorized, url }) => {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
@@ -71,10 +72,7 @@ const parseResult = result => {
|
||||
throw new UnsupportedTransport()
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
path: '/json',
|
||||
@@ -83,5 +81,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
console.error(
|
||||
@@ -30,10 +32,7 @@ const parseResult = result => {
|
||||
return result.Value
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
port,
|
||||
@@ -41,5 +40,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.7.2",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
@@ -43,7 +43,7 @@
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import kindOf from 'kindof'
|
||||
import { BaseError } from 'make-error'
|
||||
import { EventEmitter } from 'events'
|
||||
import { forEach } from 'lodash'
|
||||
import { forOwn } from 'lodash'
|
||||
|
||||
import isEmpty from './is-empty'
|
||||
import isObject from './is-object'
|
||||
@@ -10,6 +10,7 @@ import isObject from './is-object'
|
||||
|
||||
const {
|
||||
create: createObject,
|
||||
keys,
|
||||
prototype: { hasOwnProperty },
|
||||
} = Object
|
||||
|
||||
@@ -63,6 +64,16 @@ export class NoSuchItem extends BaseError {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const assertValidKey = key => {
|
||||
if (!isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
const isValidKey = key => typeof key === 'number' || typeof key === 'string'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export default class Collection extends EventEmitter {
|
||||
constructor() {
|
||||
super()
|
||||
@@ -71,7 +82,7 @@ export default class Collection extends EventEmitter {
|
||||
this._buffering = 0
|
||||
this._indexes = createObject(null)
|
||||
this._indexedItems = createObject(null)
|
||||
this._items = {} // createObject(null)
|
||||
this._items = createObject(null)
|
||||
this._size = 0
|
||||
}
|
||||
|
||||
@@ -113,7 +124,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
clear() {
|
||||
forEach(this._items, (_, key) => this._remove(key))
|
||||
keys(this._items).forEach(this._remove, this)
|
||||
}
|
||||
|
||||
remove(keyOrObjectWithId) {
|
||||
@@ -176,8 +187,7 @@ export default class Collection extends EventEmitter {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// Throws a NoSuchItem.
|
||||
this._assertHas(key)
|
||||
throw new NoSuchItem(key)
|
||||
}
|
||||
|
||||
has(key) {
|
||||
@@ -189,7 +199,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
createIndex(name, index) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (hasOwnProperty.call(indexes, name)) {
|
||||
throw new DuplicateIndex(name)
|
||||
}
|
||||
@@ -201,7 +211,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
deleteIndex(name) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (!hasOwnProperty.call(indexes, name)) {
|
||||
throw new NoSuchIndex(name)
|
||||
}
|
||||
@@ -218,7 +228,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
*[Symbol.iterator]() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield [key, items[key]]
|
||||
@@ -226,7 +236,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*keys() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield key
|
||||
@@ -234,7 +244,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*values() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield items[key]
|
||||
@@ -255,11 +265,11 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
called = true
|
||||
|
||||
if (--this._buffering) {
|
||||
if (--this._buffering !== 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const { _buffer: buffer } = this
|
||||
const buffer = this._buffer
|
||||
|
||||
// Due to deduplication there could be nothing in the buffer.
|
||||
if (isEmpty(buffer)) {
|
||||
@@ -276,7 +286,7 @@ export default class Collection extends EventEmitter {
|
||||
data[buffer[key]][key] = this._items[key]
|
||||
}
|
||||
|
||||
forEach(data, (items, action) => {
|
||||
forOwn(data, (items, action) => {
|
||||
if (!isEmpty(items)) {
|
||||
this.emit(action, items)
|
||||
}
|
||||
@@ -306,16 +316,6 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
_assertValidKey(key) {
|
||||
if (!this._isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
_isValidKey(key) {
|
||||
return typeof key === 'number' || typeof key === 'string'
|
||||
}
|
||||
|
||||
_remove(key) {
|
||||
delete this._items[key]
|
||||
this._size--
|
||||
@@ -324,17 +324,17 @@ export default class Collection extends EventEmitter {
|
||||
|
||||
_resolveItem(keyOrObjectWithId, valueIfKey = undefined) {
|
||||
if (valueIfKey !== undefined) {
|
||||
this._assertValidKey(keyOrObjectWithId)
|
||||
assertValidKey(keyOrObjectWithId)
|
||||
|
||||
return [keyOrObjectWithId, valueIfKey]
|
||||
}
|
||||
|
||||
if (this._isValidKey(keyOrObjectWithId)) {
|
||||
if (isValidKey(keyOrObjectWithId)) {
|
||||
return [keyOrObjectWithId]
|
||||
}
|
||||
|
||||
const key = this.getKey(keyOrObjectWithId)
|
||||
this._assertValidKey(key)
|
||||
assertValidKey(key)
|
||||
|
||||
return [key, keyOrObjectWithId]
|
||||
}
|
||||
@@ -347,7 +347,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
if (action === ACTION_ADD) {
|
||||
this._buffer[key] = this._buffer[key] ? ACTION_UPDATE : ACTION_ADD
|
||||
this._buffer[key] = key in this._buffer ? ACTION_UPDATE : ACTION_ADD
|
||||
} else if (action === ACTION_REMOVE) {
|
||||
if (this._buffer[key] === ACTION_ADD) {
|
||||
delete this._buffer[key]
|
||||
@@ -356,7 +356,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
} else {
|
||||
// update
|
||||
if (!this._buffer[key]) {
|
||||
if (!(key in this._buffer)) {
|
||||
this._buffer[key] = ACTION_UPDATE
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^10.12.2",
|
||||
"@types/node": "^11.11.4",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
declare module 'csv-parser' {
|
||||
function csvParser(opts?: Object): any
|
||||
export = csvParser
|
||||
}
|
||||
|
||||
declare module 'exec-promise' {
|
||||
function execPromise(cb: (args: string[]) => any): void
|
||||
export = execPromise
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
"inquirer": "^6.0.0",
|
||||
"ldapjs": "^1.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"http-request-plus": "^0.7.2",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jsonrpc-websocket-client": "^0.4.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"dependencies": {
|
||||
"nodemailer": "^5.0.0",
|
||||
"nodemailer-markdown": "^1.0.1",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"slack-node": "^0.1.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.7.1",
|
||||
"version": "0.7.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -42,7 +42,7 @@
|
||||
"html-minifier": "^3.5.8",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -494,7 +494,7 @@ async function getHostsMissingPatches({ runningHosts, xo }) {
|
||||
map(runningHosts, async host => {
|
||||
let hostsPatches = await xo
|
||||
.getXapi(host)
|
||||
.listMissingPoolPatchesOnHost(host._xapiId)
|
||||
.listMissingPatches(host._xapiId)
|
||||
.catch(error => {
|
||||
console.error(
|
||||
'[WARN] error on fetching hosts missing patches:',
|
||||
|
||||
@@ -9,6 +9,18 @@ datadir = '/var/lib/xo-server/data'
|
||||
# Necessary for external authentication providers.
|
||||
createUserOnFirstSignin = true
|
||||
|
||||
# XAPI does not support chunked encoding in HTTP requests which is necessary
|
||||
# when the content length is not know which is the case for many backup related
|
||||
# operations in XO.
|
||||
#
|
||||
# It's possible to work-around this for VHDs because it's possible to guess
|
||||
# their size just by looking at the beginning of the stream.
|
||||
#
|
||||
# But it is a guess, not a certainty, it depends on how the VHDs are formatted
|
||||
# by XenServer, therefore it's disabled for the moment but can be enabled
|
||||
# specifically for a user if necessary.
|
||||
guessVhdSizeOnImport = false
|
||||
|
||||
# Whether API logs should contains the full request/response on
|
||||
# errors.
|
||||
#
|
||||
@@ -21,6 +33,22 @@ verboseApiLogsOnErrors = false
|
||||
[apiWebSocketOptions]
|
||||
perMessageDeflate = { threshold = 524288 } # 512kiB
|
||||
|
||||
[authentication]
|
||||
defaultTokenValidity = '30 days'
|
||||
maxTokenValidity = '0.5 year'
|
||||
|
||||
# Default to `maxTokenValidity`
|
||||
#permanentCookieValidity = '30 days'
|
||||
|
||||
# Default to `undefined`, ie as long as the browser is not restarted
|
||||
#
|
||||
# https://developer.mozilla.org/fr/docs/Web/HTTP/Headers/Set-Cookie#Session_cookie
|
||||
#sessionCookieValidity = '10 hours'
|
||||
|
||||
[backup]
|
||||
# Delay for which backups listing on a remote is cached
|
||||
listingDebounce = '1 min'
|
||||
|
||||
[[http.listen]]
|
||||
port = 80
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server",
|
||||
"version": "5.37.0",
|
||||
"version": "5.38.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -35,8 +35,9 @@
|
||||
"@iarna/toml": "^2.2.1",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/defined": "^0.0.0",
|
||||
"@xen-orchestra/emit-async": "^0.0.0",
|
||||
"@xen-orchestra/fs": "^0.7.1",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"@xen-orchestra/log": "^0.1.4",
|
||||
"@xen-orchestra/mixin": "^0.0.0",
|
||||
"ajv": "^6.1.1",
|
||||
@@ -70,7 +71,7 @@
|
||||
"helmet": "^3.9.0",
|
||||
"highland": "^2.11.1",
|
||||
"http-proxy": "^1.16.2",
|
||||
"http-request-plus": "^0.7.2",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"http-server-plus": "^0.10.0",
|
||||
"human-format": "^0.10.0",
|
||||
"is-redirect": "^1.0.0",
|
||||
@@ -100,7 +101,7 @@
|
||||
"passport": "^0.4.0",
|
||||
"passport-local": "^1.0.0",
|
||||
"pretty-format": "^24.0.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"proxy-agent": "^3.0.0",
|
||||
"pug": "^2.0.0-rc.4",
|
||||
"pump": "^3.0.0",
|
||||
@@ -119,9 +120,9 @@
|
||||
"tmp": "^0.0.33",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.5.1",
|
||||
"vhd-lib": "^0.6.0",
|
||||
"ws": "^6.0.0",
|
||||
"xen-api": "^0.24.5",
|
||||
"xen-api": "^0.25.0",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.4.1",
|
||||
"xo-collection": "^0.4.1",
|
||||
|
||||
87
packages/xo-server/src/_MultiKeyMap.js
Normal file
87
packages/xo-server/src/_MultiKeyMap.js
Normal file
@@ -0,0 +1,87 @@
|
||||
class Node {
|
||||
constructor(value) {
|
||||
this.children = new Map()
|
||||
this.value = value
|
||||
}
|
||||
}
|
||||
|
||||
function del(node, i, keys) {
|
||||
if (i === keys.length) {
|
||||
if (node instanceof Node) {
|
||||
node.value = undefined
|
||||
return node
|
||||
}
|
||||
return
|
||||
}
|
||||
if (!(node instanceof Node)) {
|
||||
return node
|
||||
}
|
||||
const key = keys[i]
|
||||
const { children } = node
|
||||
const child = children.get(key)
|
||||
if (child === undefined) {
|
||||
return node
|
||||
}
|
||||
const newChild = del(child, i + 1, keys)
|
||||
if (newChild === undefined) {
|
||||
if (children.size === 1) {
|
||||
return node.value
|
||||
}
|
||||
children.delete(key)
|
||||
} else if (newChild !== child) {
|
||||
children.set(key, newChild)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
function get(node, i, keys) {
|
||||
return i === keys.length
|
||||
? node instanceof Node
|
||||
? node.value
|
||||
: node
|
||||
: node instanceof Node
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
}
|
||||
|
||||
function set(node, i, keys, value) {
|
||||
if (i === keys.length) {
|
||||
if (node instanceof Node) {
|
||||
node.value = value
|
||||
return node
|
||||
}
|
||||
return value
|
||||
}
|
||||
const key = keys[i]
|
||||
if (!(node instanceof Node)) {
|
||||
node = new Node(node)
|
||||
node.children.set(key, set(undefined, i + 1, keys, value))
|
||||
} else {
|
||||
const { children } = node
|
||||
const child = children.get(key)
|
||||
const newChild = set(child, i + 1, keys, value)
|
||||
if (newChild !== child) {
|
||||
children.set(key, newChild)
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
export default class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
this._root = undefined
|
||||
}
|
||||
|
||||
delete(keys) {
|
||||
this._root = del(this._root, 0, keys)
|
||||
}
|
||||
|
||||
get(keys) {
|
||||
return get(this._root, 0, keys)
|
||||
}
|
||||
|
||||
set(keys, value) {
|
||||
this._root = set(this._root, 0, keys, value)
|
||||
}
|
||||
}
|
||||
3
packages/xo-server/src/_ensureArray.js
Normal file
3
packages/xo-server/src/_ensureArray.js
Normal file
@@ -0,0 +1,3 @@
|
||||
// Ensure the value is an array, wrap it if necessary.
|
||||
export default value =>
|
||||
value === undefined ? [] : Array.isArray(value) ? value : [value]
|
||||
21
packages/xo-server/src/_ensureArray.spec.js
Normal file
21
packages/xo-server/src/_ensureArray.spec.js
Normal file
@@ -0,0 +1,21 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import ensureArray from './_ensureArray'
|
||||
|
||||
describe('ensureArray()', function() {
|
||||
it('wrap the value in an array', function() {
|
||||
const value = 'foo'
|
||||
|
||||
expect(ensureArray(value)).toEqual([value])
|
||||
})
|
||||
|
||||
it('returns an empty array for undefined', function() {
|
||||
expect(ensureArray(undefined)).toEqual([])
|
||||
})
|
||||
|
||||
it('returns the object itself if is already an array', function() {
|
||||
const array = ['foo', 'bar', 'baz']
|
||||
|
||||
expect(ensureArray(array)).toBe(array)
|
||||
})
|
||||
})
|
||||
39
packages/xo-server/src/_pDebounceWithKey.js
Normal file
39
packages/xo-server/src/_pDebounceWithKey.js
Normal file
@@ -0,0 +1,39 @@
|
||||
import ensureArray from './_ensureArray'
|
||||
import MultiKeyMap from './_MultiKeyMap'
|
||||
|
||||
function removeCacheEntry(cache, keys) {
|
||||
cache.delete(keys)
|
||||
}
|
||||
|
||||
function scheduleRemoveCacheEntry(keys, expires) {
|
||||
const delay = expires - Date.now()
|
||||
if (delay <= 0) {
|
||||
removeCacheEntry(this, keys)
|
||||
} else {
|
||||
setTimeout(removeCacheEntry, delay, this, keys)
|
||||
}
|
||||
}
|
||||
|
||||
const defaultKeyFn = () => []
|
||||
|
||||
// debounce an async function so that all subsequent calls in a delay receive
|
||||
// the same result
|
||||
//
|
||||
// similar to `p-debounce` with `leading` set to `true` but with key support
|
||||
export default (fn, delay, keyFn = defaultKeyFn) => {
|
||||
const cache = new MultiKeyMap()
|
||||
return function() {
|
||||
const keys = ensureArray(keyFn.apply(this, arguments))
|
||||
let promise = cache.get(keys)
|
||||
if (promise === undefined) {
|
||||
cache.set(keys, (promise = fn.apply(this, arguments)))
|
||||
const remove = scheduleRemoveCacheEntry.bind(
|
||||
cache,
|
||||
keys,
|
||||
Date.now() + delay
|
||||
)
|
||||
promise.then(remove, remove)
|
||||
}
|
||||
return promise
|
||||
}
|
||||
}
|
||||
12
packages/xo-server/src/_parseDuration.js
Normal file
12
packages/xo-server/src/_parseDuration.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import ms from 'ms'
|
||||
|
||||
export default value => {
|
||||
if (typeof value === 'number') {
|
||||
return value
|
||||
}
|
||||
const duration = ms(value)
|
||||
if (duration === undefined) {
|
||||
throw new TypeError(`not a valid duration: ${duration}`)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
@@ -199,59 +199,6 @@ forget.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Returns an array of missing new patches in the host
|
||||
// Returns an empty array if up-to-date
|
||||
// Throws an error if the host is not running the latest XS version
|
||||
export function listMissingPatches({ host }) {
|
||||
return this.getXapi(host).listMissingPoolPatchesOnHost(host._xapiId)
|
||||
}
|
||||
|
||||
listMissingPatches.description =
|
||||
'return an array of missing new patches in the host'
|
||||
|
||||
listMissingPatches.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
listMissingPatches.resolve = {
|
||||
host: ['host', 'host', 'view'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function installPatch({ host, patch: patchUuid }) {
|
||||
return this.getXapi(host).installPoolPatchOnHost(patchUuid, host._xapiId)
|
||||
}
|
||||
|
||||
installPatch.description = 'install a patch on an host'
|
||||
|
||||
installPatch.params = {
|
||||
host: { type: 'string' },
|
||||
patch: { type: 'string' },
|
||||
}
|
||||
|
||||
installPatch.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function installAllPatches({ host }) {
|
||||
return this.getXapi(host).installAllPoolPatchesOnHost(host._xapiId)
|
||||
}
|
||||
|
||||
installAllPatches.description = 'install all the missing patches on a host'
|
||||
|
||||
installAllPatches.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
installAllPatches.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function emergencyShutdownHost({ host }) {
|
||||
return this.getXapi(host).emergencyShutdownHost(host._xapiId)
|
||||
}
|
||||
|
||||
@@ -101,3 +101,42 @@ runJob.params = {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export async function list({ remotes }) {
|
||||
return this.listMetadataBackups(remotes)
|
||||
}
|
||||
|
||||
list.permission = 'admin'
|
||||
|
||||
list.params = {
|
||||
remotes: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
export function restore({ id }) {
|
||||
return this.restoreMetadataBackup(id)
|
||||
}
|
||||
|
||||
restore.permission = 'admin'
|
||||
|
||||
restore.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
function delete_({ id }) {
|
||||
return this.deleteMetadataBackup(id)
|
||||
}
|
||||
delete_.permission = 'admin'
|
||||
|
||||
delete_.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
export { delete_ as delete }
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import { format } from 'json-rpc-peer'
|
||||
import { differenceBy } from 'lodash'
|
||||
import { mapToArray } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -75,40 +73,43 @@ setPoolMaster.resolve = {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function installPatch({ pool, patch: patchUuid }) {
|
||||
await this.getXapi(pool).installPoolPatchOnAllHosts(patchUuid)
|
||||
// Returns an array of missing new patches in the host
|
||||
// Returns an empty array if up-to-date
|
||||
export function listMissingPatches({ host }) {
|
||||
return this.getXapi(host).listMissingPatches(host._xapiId)
|
||||
}
|
||||
|
||||
installPatch.params = {
|
||||
pool: {
|
||||
type: 'string',
|
||||
},
|
||||
patch: {
|
||||
type: 'string',
|
||||
},
|
||||
listMissingPatches.description =
|
||||
'return an array of missing new patches in the host'
|
||||
|
||||
listMissingPatches.params = {
|
||||
host: { type: 'string' },
|
||||
}
|
||||
|
||||
installPatch.resolve = {
|
||||
pool: ['pool', 'pool', 'administrate'],
|
||||
listMissingPatches.resolve = {
|
||||
host: ['host', 'host', 'view'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function installAllPatches({ pool }) {
|
||||
await this.getXapi(pool).installAllPoolPatchesOnAllHosts()
|
||||
export async function installPatches({ pool, patches, hosts }) {
|
||||
await this.getXapi(hosts === undefined ? pool : hosts[0]).installPatches({
|
||||
patches,
|
||||
hosts,
|
||||
})
|
||||
}
|
||||
|
||||
installAllPatches.params = {
|
||||
pool: {
|
||||
type: 'string',
|
||||
},
|
||||
installPatches.params = {
|
||||
pool: { type: 'string', optional: true },
|
||||
patches: { type: 'array', optional: true },
|
||||
hosts: { type: 'array', optional: true },
|
||||
}
|
||||
|
||||
installAllPatches.resolve = {
|
||||
installPatches.resolve = {
|
||||
pool: ['pool', 'pool', 'administrate'],
|
||||
}
|
||||
|
||||
installAllPatches.description =
|
||||
'Install automatically all patches for every hosts of a pool'
|
||||
installPatches.description = 'Install patches on hosts'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
@@ -144,6 +145,22 @@ export { uploadPatch as patch }
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function getPatchesDifference({ source, target }) {
|
||||
return this.getPatchesDifference(target.id, source.id)
|
||||
}
|
||||
|
||||
getPatchesDifference.params = {
|
||||
source: { type: 'string' },
|
||||
target: { type: 'string' },
|
||||
}
|
||||
|
||||
getPatchesDifference.resolve = {
|
||||
source: ['source', 'host', 'view'],
|
||||
target: ['target', 'host', 'view'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export async function mergeInto({ source, target, force }) {
|
||||
const sourceHost = this.getObject(source.master)
|
||||
const targetHost = this.getObject(target.master)
|
||||
@@ -156,21 +173,21 @@ export async function mergeInto({ source, target, force }) {
|
||||
)
|
||||
}
|
||||
|
||||
const sourcePatches = sourceHost.patches
|
||||
const targetPatches = targetHost.patches
|
||||
const counterDiff = differenceBy(sourcePatches, targetPatches, 'name')
|
||||
|
||||
const counterDiff = this.getPatchesDifference(source.master, target.master)
|
||||
if (counterDiff.length > 0) {
|
||||
throw new Error('host has patches that are not applied on target pool')
|
||||
const targetXapi = this.getXapi(target)
|
||||
await targetXapi.installPatches({
|
||||
patches: await targetXapi.findPatches(counterDiff),
|
||||
})
|
||||
}
|
||||
|
||||
const diff = differenceBy(targetPatches, sourcePatches, 'name')
|
||||
|
||||
// TODO: compare UUIDs
|
||||
await this.getXapi(source).installSpecificPatchesOnHost(
|
||||
mapToArray(diff, 'name'),
|
||||
sourceHost._xapiId
|
||||
)
|
||||
const diff = this.getPatchesDifference(target.master, source.master)
|
||||
if (diff.length > 0) {
|
||||
const sourceXapi = this.getXapi(source)
|
||||
await sourceXapi.installPatches({
|
||||
patches: await sourceXapi.findPatches(diff),
|
||||
})
|
||||
}
|
||||
|
||||
await this.mergeXenPools(source._xapiId, target._xapiId, force)
|
||||
}
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
import { deprecate } from 'util'
|
||||
|
||||
import { getUserPublicProperties } from '../utils'
|
||||
import { invalidCredentials } from 'xo-common/api-errors'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export async function signIn(credentials) {
|
||||
const user = await this.authenticateUser(credentials)
|
||||
if (!user) {
|
||||
throw invalidCredentials()
|
||||
const { session } = this
|
||||
|
||||
const { user, expiration } = await this.authenticateUser(credentials)
|
||||
session.set('user_id', user.id)
|
||||
|
||||
if (expiration === undefined) {
|
||||
session.unset('expiration')
|
||||
} else {
|
||||
session.set('expiration', expiration)
|
||||
}
|
||||
this.session.set('user_id', user.id)
|
||||
|
||||
return getUserPublicProperties(user)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import { some } from 'lodash'
|
||||
|
||||
import ensureArray from '../_ensureArray'
|
||||
import { asInteger } from '../xapi/utils'
|
||||
import { ensureArray, forEach, parseXml } from '../utils'
|
||||
import { forEach, parseXml } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
|
||||
@@ -10,8 +10,9 @@ import { invalidParameters } from 'xo-common/api-errors'
|
||||
import { v4 as generateUuid } from 'uuid'
|
||||
import { includes, remove, filter, find, range } from 'lodash'
|
||||
|
||||
import ensureArray from '../_ensureArray'
|
||||
import { asInteger } from '../xapi/utils'
|
||||
import { parseXml, ensureArray } from '../utils'
|
||||
import { parseXml } from '../utils'
|
||||
|
||||
const log = createLogger('xo:xosan')
|
||||
|
||||
|
||||
@@ -15,17 +15,20 @@ import pw from 'pw'
|
||||
import serveStatic from 'serve-static'
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import stoppable from 'stoppable'
|
||||
import WebServer from 'http-server-plus'
|
||||
import WebSocket from 'ws'
|
||||
|
||||
import { compile as compilePug } from 'pug'
|
||||
import { createServer as createProxyServer } from 'http-proxy'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { ifDef } from '@xen-orchestra/defined'
|
||||
import { join as joinPath } from 'path'
|
||||
|
||||
import JsonRpcPeer from 'json-rpc-peer'
|
||||
import { invalidCredentials } from 'xo-common/api-errors'
|
||||
import { ensureDir, readdir, readFile } from 'fs-extra'
|
||||
|
||||
import WebServer from 'http-server-plus'
|
||||
import parseDuration from './_parseDuration'
|
||||
import Xo from './xo'
|
||||
import {
|
||||
forEach,
|
||||
@@ -121,7 +124,7 @@ function createExpressApp(config) {
|
||||
return app
|
||||
}
|
||||
|
||||
async function setUpPassport(express, xo) {
|
||||
async function setUpPassport(express, xo, { authentication: authCfg }) {
|
||||
const strategies = { __proto__: null }
|
||||
xo.registerPassportStrategy = strategy => {
|
||||
passport.use(strategy)
|
||||
@@ -179,16 +182,24 @@ async function setUpPassport(express, xo) {
|
||||
}
|
||||
})
|
||||
|
||||
const PERMANENT_VALIDITY = ifDef(
|
||||
authCfg.permanentCookieValidity,
|
||||
parseDuration
|
||||
)
|
||||
const SESSION_VALIDITY = ifDef(authCfg.sessionCookieValidity, parseDuration)
|
||||
const setToken = async (req, res, next) => {
|
||||
const { user, isPersistent } = req.session
|
||||
const token = (await xo.createAuthenticationToken({ userId: user.id })).id
|
||||
const token = await xo.createAuthenticationToken({
|
||||
expiresIn: isPersistent ? PERMANENT_VALIDITY : SESSION_VALIDITY,
|
||||
userId: user.id,
|
||||
})
|
||||
|
||||
// Persistent cookie ? => 1 year
|
||||
// Non-persistent : external provider as Github, Twitter...
|
||||
res.cookie(
|
||||
'token',
|
||||
token,
|
||||
isPersistent ? { maxAge: 1000 * 60 * 60 * 24 * 365 } : undefined
|
||||
token.id,
|
||||
// a session (non-permanent) cookie must not have an expiration date
|
||||
// because it must not survive browser restart
|
||||
isPersistent ? { expires: new Date(token.expiration) } : undefined
|
||||
)
|
||||
|
||||
delete req.session.isPersistent
|
||||
@@ -240,7 +251,7 @@ async function setUpPassport(express, xo) {
|
||||
xo.registerPassportStrategy(
|
||||
new LocalStrategy(async (username, password, done) => {
|
||||
try {
|
||||
const user = await xo.authenticateUser({ username, password })
|
||||
const { user } = await xo.authenticateUser({ username, password })
|
||||
done(null, user)
|
||||
} catch (error) {
|
||||
done(null, false, { message: error.message })
|
||||
@@ -359,6 +370,7 @@ async function makeWebServerListen(
|
||||
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
|
||||
if (opts.key.includes('ENCRYPTED')) {
|
||||
opts.passphrase = await new Promise(resolve => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log('Encrypted key %s', key)
|
||||
process.stdout.write(`Enter pass phrase: `)
|
||||
pw(resolve)
|
||||
@@ -506,6 +518,11 @@ const setUpApi = (webServer, xo, config) => {
|
||||
|
||||
// Connect the WebSocket to the JSON-RPC server.
|
||||
socket.on('message', message => {
|
||||
const expiration = connection.get('expiration', undefined)
|
||||
if (expiration !== undefined && expiration < Date.now()) {
|
||||
return void connection.close()
|
||||
}
|
||||
|
||||
jsonRpc.write(message)
|
||||
})
|
||||
|
||||
@@ -553,7 +570,7 @@ const setUpConsoleProxy = (webServer, xo) => {
|
||||
{
|
||||
const { token } = parseCookies(req.headers.cookie)
|
||||
|
||||
const user = await xo.authenticateUser({ token })
|
||||
const { user } = await xo.authenticateUser({ token })
|
||||
if (!(await xo.hasPermissions(user.id, [[id, 'operate']]))) {
|
||||
throw invalidCredentials()
|
||||
}
|
||||
@@ -573,6 +590,9 @@ const setUpConsoleProxy = (webServer, xo) => {
|
||||
proxyConsole(connection, vmConsole, xapi.sessionId)
|
||||
})
|
||||
} catch (error) {
|
||||
try {
|
||||
socket.end()
|
||||
} catch (_) {}
|
||||
console.error((error && error.stack) || error)
|
||||
}
|
||||
})
|
||||
@@ -670,7 +690,7 @@ export default async function main(args) {
|
||||
|
||||
// Everything above is not protected by the sign in, allowing xo-cli
|
||||
// to work properly.
|
||||
await setUpPassport(express, xo)
|
||||
await setUpPassport(express, xo, config)
|
||||
|
||||
// Attaches express to the web server.
|
||||
webServer.on('request', express)
|
||||
|
||||
@@ -14,6 +14,10 @@ export class Remotes extends Collection {
|
||||
async get(properties) {
|
||||
const remotes = await super.get(properties)
|
||||
forEach(remotes, remote => {
|
||||
remote.benchmarks =
|
||||
remote.benchmarks !== undefined
|
||||
? JSON.parse(remote.benchmarks)
|
||||
: undefined
|
||||
remote.enabled = remote.enabled === 'true'
|
||||
})
|
||||
return remotes
|
||||
|
||||
@@ -10,7 +10,7 @@ const recoverAccount = async ([name]) => {
|
||||
xo-server-recover-account <user name or email>
|
||||
|
||||
If the user does not exist, it is created, if it exists, updates
|
||||
its password and resets its permission to Admin.
|
||||
its password, remove any configured OTP and resets its permission to Admin.
|
||||
`
|
||||
}
|
||||
|
||||
@@ -32,7 +32,11 @@ xo-server-recover-account <user name or email>
|
||||
|
||||
const user = await xo.getUserByName(name, true)
|
||||
if (user !== null) {
|
||||
await xo.updateUser(user.id, { password, permission: 'admin' })
|
||||
await xo.updateUser(user.id, {
|
||||
password,
|
||||
permission: 'admin',
|
||||
preferences: { otp: null },
|
||||
})
|
||||
console.log(`user ${name} has been successfully updated`)
|
||||
} else {
|
||||
await xo.createUser({ name, password, permission: 'admin' })
|
||||
|
||||
@@ -3,7 +3,6 @@ import forEach from 'lodash/forEach'
|
||||
import has from 'lodash/has'
|
||||
import highland from 'highland'
|
||||
import humanFormat from 'human-format'
|
||||
import isArray from 'lodash/isArray'
|
||||
import isString from 'lodash/isString'
|
||||
import keys from 'lodash/keys'
|
||||
import multiKeyHashInt from 'multikey-hash'
|
||||
@@ -49,17 +48,6 @@ export const diffItems = (coll1, coll2) => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Ensure the value is an array, wrap it if necessary.
|
||||
export function ensureArray(value) {
|
||||
if (value === undefined) {
|
||||
return []
|
||||
}
|
||||
|
||||
return isArray(value) ? value : [value]
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Returns the value of a property and removes it from the object.
|
||||
export function extractProperty(obj, prop) {
|
||||
const value = obj[prop]
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
diffItems,
|
||||
ensureArray,
|
||||
extractProperty,
|
||||
formatXml,
|
||||
generateToken,
|
||||
@@ -42,26 +41,6 @@ describe('diffItems', () => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('ensureArray()', function() {
|
||||
it('wrap the value in an array', function() {
|
||||
const value = 'foo'
|
||||
|
||||
expect(ensureArray(value)).toEqual([value])
|
||||
})
|
||||
|
||||
it('returns an empty array for undefined', function() {
|
||||
expect(ensureArray(undefined)).toEqual([])
|
||||
})
|
||||
|
||||
it('returns the object itself if is already an array', function() {
|
||||
const array = ['foo', 'bar', 'baz']
|
||||
|
||||
expect(ensureArray(array)).toBe(array)
|
||||
})
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('extractProperty()', function() {
|
||||
it('returns the value of the property', function() {
|
||||
const value = {}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { startsWith } from 'lodash'
|
||||
|
||||
import ensureArray from './_ensureArray'
|
||||
import {
|
||||
ensureArray,
|
||||
extractProperty,
|
||||
forEach,
|
||||
isArray,
|
||||
@@ -102,11 +102,10 @@ const TRANSFORMS = {
|
||||
} = obj
|
||||
|
||||
const isRunning = isHostRunning(obj)
|
||||
let supplementalPacks, patches
|
||||
let supplementalPacks
|
||||
|
||||
if (useUpdateSystem(obj)) {
|
||||
supplementalPacks = []
|
||||
patches = []
|
||||
|
||||
forEach(obj.$updates, update => {
|
||||
const formattedUpdate = {
|
||||
@@ -121,7 +120,7 @@ const TRANSFORMS = {
|
||||
}
|
||||
|
||||
if (startsWith(update.name_label, 'XS')) {
|
||||
patches.push(formattedUpdate)
|
||||
// It's a patch update but for homogeneity, we're still using pool_patches
|
||||
} else {
|
||||
supplementalPacks.push(formattedUpdate)
|
||||
}
|
||||
@@ -171,7 +170,7 @@ const TRANSFORMS = {
|
||||
}
|
||||
})(),
|
||||
multipathing: otherConfig.multipathing === 'true',
|
||||
patches: patches || link(obj, 'patches'),
|
||||
patches: link(obj, 'patches'),
|
||||
powerOnMode: obj.power_on_mode,
|
||||
power_state: metrics ? (isRunning ? 'Running' : 'Halted') : 'Unknown',
|
||||
startTime: toTimestamp(otherConfig.boot_time),
|
||||
@@ -625,10 +624,18 @@ const TRANSFORMS = {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
host_patch(obj) {
|
||||
const poolPatch = obj.$pool_patch
|
||||
return {
|
||||
type: 'patch',
|
||||
|
||||
applied: Boolean(obj.applied),
|
||||
enforceHomogeneity: poolPatch.pool_applied,
|
||||
description: poolPatch.name_description,
|
||||
name: poolPatch.name_label,
|
||||
pool_patch: poolPatch.$ref,
|
||||
size: poolPatch.size,
|
||||
guidance: poolPatch.after_apply_guidance,
|
||||
time: toTimestamp(obj.timestamp_applied),
|
||||
pool_patch: link(obj, 'pool_patch', '$ref'),
|
||||
|
||||
$host: link(obj, 'host'),
|
||||
}
|
||||
@@ -640,12 +647,15 @@ const TRANSFORMS = {
|
||||
return {
|
||||
id: obj.$ref,
|
||||
|
||||
applied: Boolean(obj.pool_applied),
|
||||
dataUuid: obj.uuid, // UUID of the patch file as stated in Citrix's XML file
|
||||
description: obj.name_description,
|
||||
guidance: obj.after_apply_guidance,
|
||||
name: obj.name_label,
|
||||
size: +obj.size,
|
||||
uuid: obj.uuid,
|
||||
uuid: obj.$ref,
|
||||
|
||||
// TODO: means that the patch must be applied on every host
|
||||
// applied: Boolean(obj.pool_applied),
|
||||
|
||||
// TODO: what does it mean, should we handle it?
|
||||
// version: obj.version,
|
||||
|
||||
@@ -3,10 +3,10 @@ import limitConcurrency from 'limit-concurrency-decorator'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { BaseError } from 'make-error'
|
||||
import {
|
||||
defaults,
|
||||
endsWith,
|
||||
findKey,
|
||||
forEach,
|
||||
get,
|
||||
identity,
|
||||
map,
|
||||
mapValues,
|
||||
@@ -52,11 +52,6 @@ const RRD_POINTS_PER_STEP = {
|
||||
// Utils
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Return current local timestamp in seconds
|
||||
function getCurrentTimestamp() {
|
||||
return Date.now() / 1000
|
||||
}
|
||||
|
||||
function convertNanToNull(value) {
|
||||
return isNaN(value) ? null : value
|
||||
}
|
||||
@@ -78,23 +73,8 @@ const computeValues = (dataRow, legendIndex, transformValue = identity) =>
|
||||
const combineStats = (stats, path, combineValues) =>
|
||||
zipWith(...map(stats, path), (...values) => combineValues(values))
|
||||
|
||||
// It browse the object in depth and initialise it's properties
|
||||
// The targerPath can be a string or an array containing the depth
|
||||
// targetPath: [a, b, c] => a.b.c
|
||||
const getValuesFromDepth = (obj, targetPath) => {
|
||||
if (typeof targetPath === 'string') {
|
||||
return (obj[targetPath] = [])
|
||||
}
|
||||
|
||||
forEach(targetPath, (path, key) => {
|
||||
if (obj[path] === undefined) {
|
||||
obj = obj[path] = targetPath.length - 1 === key ? [] : {}
|
||||
return
|
||||
}
|
||||
obj = obj[path]
|
||||
})
|
||||
return obj
|
||||
}
|
||||
const createGetProperty = (obj, property, defaultValue) =>
|
||||
defaults(obj, { [property]: defaultValue })[property]
|
||||
|
||||
const testMetric = (test, type) =>
|
||||
typeof test === 'string'
|
||||
@@ -297,45 +277,26 @@ export default class XapiStats {
|
||||
.then(response => response.readAll().then(JSON5.parse))
|
||||
}
|
||||
|
||||
async _getNextTimestamp(xapi, host, step) {
|
||||
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
|
||||
const maxDuration = step * RRD_POINTS_PER_STEP[step]
|
||||
const lastTimestamp = get(this._statsByObject, [
|
||||
host.uuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
// To avoid multiple requests, we keep a cash for the stats and
|
||||
// only return it if we not exceed a step
|
||||
_getCachedStats(uuid, step, currentTimeStamp) {
|
||||
const statsByObject = this._statsByObject
|
||||
|
||||
if (
|
||||
lastTimestamp === undefined ||
|
||||
currentTimeStamp - lastTimestamp + step > maxDuration
|
||||
) {
|
||||
return currentTimeStamp - maxDuration + step
|
||||
}
|
||||
return lastTimestamp
|
||||
}
|
||||
|
||||
_getStats(hostUuid, step, vmUuid) {
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
|
||||
// Return host stats
|
||||
if (vmUuid === undefined) {
|
||||
return {
|
||||
interval: step,
|
||||
...hostStats,
|
||||
}
|
||||
const stats = statsByObject[uuid]?.[step]
|
||||
if (stats === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// Return vm stats
|
||||
return {
|
||||
interval: step,
|
||||
endTimestamp: hostStats.endTimestamp,
|
||||
...this._statsByObject[vmUuid][step],
|
||||
if (stats.endTimestamp + step < currentTimeStamp) {
|
||||
delete statsByObject[uuid][step]
|
||||
return
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
@synchronized.withKey((_, { host }) => host.uuid)
|
||||
async _getAndUpdateStats(xapi, { host, vmUuid, granularity }) {
|
||||
async _getAndUpdateStats(xapi, { host, uuid, granularity }) {
|
||||
const step =
|
||||
granularity === undefined
|
||||
? RRD_STEP_SECONDS
|
||||
@@ -347,103 +308,93 @@ export default class XapiStats {
|
||||
)
|
||||
}
|
||||
|
||||
// Limit the number of http requests
|
||||
const hostUuid = host.uuid
|
||||
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
|
||||
|
||||
if (
|
||||
!(
|
||||
vmUuid !== undefined &&
|
||||
get(this._statsByObject, [vmUuid, step]) === undefined
|
||||
) &&
|
||||
get(this._statsByObject, [hostUuid, step, 'localTimestamp']) + step >
|
||||
getCurrentTimestamp()
|
||||
) {
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
const stats = this._getCachedStats(uuid, step, currentTimeStamp)
|
||||
if (stats !== undefined) {
|
||||
return stats
|
||||
}
|
||||
|
||||
const timestamp = await this._getNextTimestamp(xapi, host, step)
|
||||
const json = await this._getJson(xapi, host, timestamp, step)
|
||||
if (json.meta.step !== step) {
|
||||
const maxDuration = step * RRD_POINTS_PER_STEP[step]
|
||||
|
||||
// To avoid crossing over the boundary, we ask for one less step
|
||||
const optimumTimestamp = currentTimeStamp - maxDuration + step
|
||||
const json = await this._getJson(xapi, host, optimumTimestamp, step)
|
||||
|
||||
const actualStep = json.meta.step
|
||||
if (json.data.length > 0) {
|
||||
// fetched data is organized from the newest to the oldest
|
||||
// but this implementation requires it in the other direction
|
||||
json.data.reverse()
|
||||
json.meta.legend.forEach((legend, index) => {
|
||||
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
|
||||
legend
|
||||
)
|
||||
|
||||
const metrics = STATS[type]
|
||||
if (metrics === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { metric, testResult } = findMetric(metrics, metricType)
|
||||
if (metric === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const xoObjectStats = createGetProperty(this._statsByObject, uuid, {})
|
||||
let stepStats = xoObjectStats[actualStep]
|
||||
if (
|
||||
stepStats === undefined ||
|
||||
stepStats.endTimestamp !== json.meta.end
|
||||
) {
|
||||
stepStats = xoObjectStats[actualStep] = {
|
||||
endTimestamp: json.meta.end,
|
||||
interval: actualStep,
|
||||
}
|
||||
}
|
||||
|
||||
const path =
|
||||
metric.getPath !== undefined
|
||||
? metric.getPath(testResult)
|
||||
: [findKey(metrics, metric)]
|
||||
|
||||
const lastKey = path.length - 1
|
||||
let metricStats = createGetProperty(stepStats, 'stats', {})
|
||||
path.forEach((property, key) => {
|
||||
if (key === lastKey) {
|
||||
metricStats[property] = computeValues(
|
||||
json.data,
|
||||
index,
|
||||
metric.transformValue
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
metricStats = createGetProperty(metricStats, property, {})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if (actualStep !== step) {
|
||||
throw new FaultyGranularity(
|
||||
`Unable to get the true granularity: ${json.meta.step}`
|
||||
`Unable to get the true granularity: ${actualStep}`
|
||||
)
|
||||
}
|
||||
|
||||
// It exists data
|
||||
if (json.data.length !== 0) {
|
||||
// Warning: Sometimes, the json.xport.meta.start value does not match with the
|
||||
// timestamp of the oldest data value
|
||||
// So, we use the timestamp of the oldest data value !
|
||||
const startTimestamp = json.data[json.meta.rows - 1].t
|
||||
const endTimestamp = get(this._statsByObject, [
|
||||
hostUuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
|
||||
const statsOffset = endTimestamp - startTimestamp + step
|
||||
if (endTimestamp !== undefined && statsOffset > 0) {
|
||||
const parseOffset = statsOffset / step
|
||||
// Remove useless data
|
||||
// Note: Older values are at end of json.data.row
|
||||
json.data.splice(json.data.length - parseOffset)
|
||||
return (
|
||||
this._statsByObject[uuid]?.[step] ?? {
|
||||
endTimestamp: currentTimeStamp,
|
||||
interval: step,
|
||||
stats: {},
|
||||
}
|
||||
|
||||
// It exists useful data
|
||||
if (json.data.length > 0) {
|
||||
// reorder data
|
||||
json.data.reverse()
|
||||
forEach(json.meta.legend, (legend, index) => {
|
||||
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
|
||||
legend
|
||||
)
|
||||
|
||||
const metrics = STATS[type]
|
||||
if (metrics === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { metric, testResult } = findMetric(metrics, metricType)
|
||||
|
||||
if (metric === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const path =
|
||||
metric.getPath !== undefined
|
||||
? metric.getPath(testResult)
|
||||
: [findKey(metrics, metric)]
|
||||
|
||||
const metricValues = getValuesFromDepth(this._statsByObject, [
|
||||
uuid,
|
||||
step,
|
||||
'stats',
|
||||
...path,
|
||||
])
|
||||
|
||||
metricValues.push(
|
||||
...computeValues(json.data, index, metric.transformValue)
|
||||
)
|
||||
|
||||
// remove older Values
|
||||
metricValues.splice(
|
||||
0,
|
||||
metricValues.length - RRD_POINTS_PER_STEP[step]
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
hostStats.endTimestamp = json.meta.end
|
||||
hostStats.localTimestamp = getCurrentTimestamp()
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
)
|
||||
}
|
||||
|
||||
getHostStats(xapi, hostId, granularity) {
|
||||
const host = xapi.getObject(hostId)
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host: xapi.getObject(hostId),
|
||||
host,
|
||||
uuid: host.uuid,
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
@@ -457,7 +408,7 @@ export default class XapiStats {
|
||||
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host,
|
||||
vmUuid: vm.uuid,
|
||||
uuid: vm.uuid,
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -35,11 +35,11 @@ import {
|
||||
import { satisfies as versionSatisfies } from 'semver'
|
||||
|
||||
import createSizeStream from '../size-stream'
|
||||
import ensureArray from '../_ensureArray'
|
||||
import fatfsBuffer, { init as fatfsBufferInit } from '../fatfs-buffer'
|
||||
import pRetry from '../_pRetry'
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
ensureArray,
|
||||
forEach,
|
||||
isFunction,
|
||||
map,
|
||||
@@ -68,6 +68,7 @@ import {
|
||||
parseDateTime,
|
||||
prepareXapiParam,
|
||||
} from './utils'
|
||||
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||
|
||||
const log = createLogger('xo:xapi')
|
||||
|
||||
@@ -93,8 +94,10 @@ export const IPV6_CONFIG_MODES = ['None', 'DHCP', 'Static', 'Autoconf']
|
||||
|
||||
@mixin(mapToArray(mixins))
|
||||
export default class Xapi extends XapiBase {
|
||||
constructor(...args) {
|
||||
super(...args)
|
||||
constructor({ guessVhdSizeOnImport, ...opts }) {
|
||||
super(opts)
|
||||
|
||||
this._guessVhdSizeOnImport = guessVhdSizeOnImport
|
||||
|
||||
// Patch getObject to resolve _xapiId property.
|
||||
this.getObject = (getObject => (...args) => {
|
||||
@@ -1564,47 +1567,28 @@ export default class Xapi extends XapiBase {
|
||||
}`
|
||||
)
|
||||
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/4074
|
||||
const snapshotNameLabelPrefix = `Snapshot of ${vm.uuid} [`
|
||||
ignoreErrors.call(
|
||||
Promise.all(
|
||||
vm.snapshots.map(async ref => {
|
||||
const nameLabel = await this.getField('VM', ref, 'name_label')
|
||||
if (nameLabel.startsWith(snapshotNameLabelPrefix)) {
|
||||
return this._deleteVm(ref)
|
||||
}
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
let ref
|
||||
do {
|
||||
if (!vm.tags.includes('xo-disable-quiesce')) {
|
||||
try {
|
||||
ref = await pRetry(
|
||||
async bail => {
|
||||
try {
|
||||
return await this.callAsync(
|
||||
$cancelToken,
|
||||
'VM.snapshot_with_quiesce',
|
||||
vmRef,
|
||||
nameLabel
|
||||
)
|
||||
} catch (error) {
|
||||
if (error?.code !== 'VM_SNAPSHOT_WITH_QUIESCE_FAILED') {
|
||||
throw bail(error)
|
||||
}
|
||||
|
||||
// detect and remove new broken snapshots
|
||||
//
|
||||
// see https://github.com/vatesfr/xen-orchestra/issues/3936
|
||||
const prevSnapshotRefs = new Set(vm.snapshots)
|
||||
const snapshotNameLabelPrefix = `Snapshot of ${vm.uuid} [`
|
||||
vm.snapshots = await this.getField('VM', vmRef, 'snapshots')
|
||||
const createdSnapshots = (await this.getRecords(
|
||||
'VM',
|
||||
vm.snapshots.filter(_ => !prevSnapshotRefs.has(_))
|
||||
)).filter(_ => _.name_label.startsWith(snapshotNameLabelPrefix))
|
||||
|
||||
// be safe: only delete if there was a single match
|
||||
if (createdSnapshots.length === 1) {
|
||||
ignoreErrors.call(this._deleteVm(createdSnapshots[0]))
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
},
|
||||
{
|
||||
delay: 60e3,
|
||||
tries: 3,
|
||||
}
|
||||
ref = await this.callAsync(
|
||||
$cancelToken,
|
||||
'VM.snapshot_with_quiesce',
|
||||
vmRef,
|
||||
nameLabel
|
||||
).then(extractOpaqueRef)
|
||||
ignoreErrors.call(this.call('VM.add_tags', ref, 'quiesce'))
|
||||
|
||||
@@ -2095,11 +2079,16 @@ export default class Xapi extends XapiBase {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async _importVdiContent(vdi, body, format = VDI_FORMAT_VHD) {
|
||||
if (__DEV__ && body.length == null) {
|
||||
throw new Error(
|
||||
'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.'
|
||||
)
|
||||
if (typeof body.pipe === 'function' && body.length === undefined) {
|
||||
if (this._guessVhdSizeOnImport && format === VDI_FORMAT_VHD) {
|
||||
body = await createVhdStreamWithLength(body)
|
||||
} else if (__DEV__) {
|
||||
throw new Error(
|
||||
'Trying to import a VDI without a length field. Please report this error to Xen Orchestra.'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
body.task,
|
||||
body.checksumVerified,
|
||||
@@ -2378,8 +2367,6 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
// Generic Config Drive
|
||||
//
|
||||
// https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html
|
||||
@deferrable
|
||||
async createCloudInitConfigDrive(
|
||||
$defer,
|
||||
@@ -2401,13 +2388,32 @@ export default class Xapi extends XapiBase {
|
||||
$defer.onFailure(() => this._deleteVdi(vdi.$ref))
|
||||
|
||||
// Then, generate a FAT fs
|
||||
const fs = promisifyAll(fatfs.createFileSystem(fatfsBuffer(buffer)))
|
||||
const { mkdir, writeFile } = promisifyAll(
|
||||
fatfs.createFileSystem(fatfsBuffer(buffer))
|
||||
)
|
||||
|
||||
await Promise.all([
|
||||
fs.writeFile('meta-data', 'instance-id: ' + vm.uuid + '\n'),
|
||||
fs.writeFile('user-data', userConfig),
|
||||
networkConfig !== undefined &&
|
||||
fs.writeFile('network-config', networkConfig),
|
||||
// preferred datasource: NoCloud
|
||||
//
|
||||
// https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html
|
||||
writeFile('meta-data', 'instance-id: ' + vm.uuid + '\n'),
|
||||
writeFile('user-data', userConfig),
|
||||
networkConfig !== undefined && writeFile('network-config', networkConfig),
|
||||
|
||||
// fallback datasource: Config Drive 2
|
||||
//
|
||||
// https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html#version-2
|
||||
mkdir('openstack').then(() =>
|
||||
mkdir('openstack/latest').then(() =>
|
||||
Promise.all([
|
||||
writeFile(
|
||||
'openstack/latest/meta_data.json',
|
||||
JSON.stringify({ uuid: vm.uuid })
|
||||
),
|
||||
writeFile('openstack/latest/user_data', userConfig),
|
||||
])
|
||||
)
|
||||
),
|
||||
])
|
||||
|
||||
// ignore errors, I (JFT) don't understand why they are emitted
|
||||
@@ -2466,6 +2472,15 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
// Main purpose: upload update on VDI
|
||||
// Is a local SR on a non master host OK?
|
||||
findAvailableSr(minSize) {
|
||||
return find(
|
||||
this.objects.all,
|
||||
obj => obj.$type === 'SR' && canSrHaveNewVdiOfSize(obj, minSize)
|
||||
)
|
||||
}
|
||||
|
||||
async _assertConsistentHostServerTime(hostRef) {
|
||||
const delta =
|
||||
parseDateTime(await this.call('host.get_servertime', hostRef)).getTime() -
|
||||
|
||||
@@ -1,31 +1,44 @@
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import deferrable from 'golike-defer'
|
||||
import every from 'lodash/every'
|
||||
import filter from 'lodash/filter'
|
||||
import find from 'lodash/find'
|
||||
import includes from 'lodash/includes'
|
||||
import isObject from 'lodash/isObject'
|
||||
import pickBy from 'lodash/pickBy'
|
||||
import some from 'lodash/some'
|
||||
import sortBy from 'lodash/sortBy'
|
||||
import assign from 'lodash/assign'
|
||||
import unzip from 'julien-f-unzip'
|
||||
import { filter, find, pickBy, some } from 'lodash'
|
||||
|
||||
import ensureArray from '../../_ensureArray'
|
||||
import { debounce } from '../../decorators'
|
||||
import {
|
||||
ensureArray,
|
||||
forEach,
|
||||
mapFilter,
|
||||
mapToArray,
|
||||
parseXml,
|
||||
} from '../../utils'
|
||||
import { forEach, mapFilter, mapToArray, parseXml } from '../../utils'
|
||||
|
||||
import { extractOpaqueRef, useUpdateSystem } from '../utils'
|
||||
|
||||
// TOC -------------------------------------------------------------------------
|
||||
|
||||
// # HELPERS
|
||||
// _isXcp
|
||||
// _ejectToolsIsos
|
||||
// _getXenUpdates Map of Objects
|
||||
// # LIST
|
||||
// _listXcpUpdates XCP available updates - Array of Objects
|
||||
// _listPatches XS patches (installed or not) - Map of Objects
|
||||
// _listInstalledPatches XS installed patches on the host - Map of Booleans
|
||||
// _listInstallablePatches XS (host, requested patches) → sorted patches that are not installed and not conflicting - Array of Objects
|
||||
// listMissingPatches HL: installable patches (XS) or updates (XCP) - Array of Objects
|
||||
// findPatches HL: get XS patches IDs from names
|
||||
// # INSTALL
|
||||
// _xcpUpdate XCP yum update
|
||||
// _legacyUploadPatch XS legacy upload
|
||||
// _uploadPatch XS upload on a dedicated VDI
|
||||
// installPatches HL: install patches (XS) or yum update (XCP) on hosts
|
||||
|
||||
// HELPERS ---------------------------------------------------------------------
|
||||
|
||||
const log = createLogger('xo:xapi')
|
||||
|
||||
const _isXcp = host => host.software_version.product_brand === 'XCP-ng'
|
||||
|
||||
// =============================================================================
|
||||
|
||||
export default {
|
||||
// raw { uuid: patch } map translated from updates.xensource.com/XenServer/updates.xml
|
||||
// FIXME: should be static
|
||||
@debounce(24 * 60 * 60 * 1000)
|
||||
async _getXenUpdates() {
|
||||
@@ -48,13 +61,16 @@ export default {
|
||||
guidance: patch['after-apply-guidance'],
|
||||
name: patch['name-label'],
|
||||
url: patch['patch-url'],
|
||||
id: patch.uuid,
|
||||
uuid: patch.uuid,
|
||||
conflicts: mapToArray(ensureArray(patch.conflictingpatches), patch => {
|
||||
return patch.conflictingpatch.uuid
|
||||
}),
|
||||
requirements: mapToArray(ensureArray(patch.requiredpatches), patch => {
|
||||
return patch.requiredpatch.uuid
|
||||
}),
|
||||
conflicts: mapToArray(
|
||||
ensureArray(patch.conflictingpatches),
|
||||
patch => patch.conflictingpatch.uuid
|
||||
),
|
||||
requirements: mapToArray(
|
||||
ensureArray(patch.requiredpatches),
|
||||
patch => patch.requiredpatch.uuid
|
||||
),
|
||||
paid: patch['update-stream'] === 'premium',
|
||||
upgrade: /^XS\d{2,}$/.test(patch['name-label']),
|
||||
// TODO: what does it mean, should we handle it?
|
||||
@@ -101,72 +117,12 @@ export default {
|
||||
}
|
||||
},
|
||||
|
||||
// =================================================================
|
||||
|
||||
// Returns installed and not installed patches for a given host.
|
||||
async _getPoolPatchesForHost(host) {
|
||||
const versions = (await this._getXenUpdates()).versions
|
||||
|
||||
const hostVersions = host.software_version
|
||||
const version =
|
||||
versions[hostVersions.product_version] ||
|
||||
versions[hostVersions.product_version_text]
|
||||
|
||||
return version ? version.patches : []
|
||||
},
|
||||
|
||||
_getInstalledPoolPatchesOnHost(host) {
|
||||
const installed = { __proto__: null }
|
||||
|
||||
// platform_version < 2.1.1
|
||||
forEach(host.$patches, hostPatch => {
|
||||
installed[hostPatch.$pool_patch.uuid] = true
|
||||
})
|
||||
|
||||
// platform_version >= 2.1.1
|
||||
forEach(host.$updates, update => {
|
||||
installed[update.uuid] = true // TODO: ignore packs
|
||||
})
|
||||
|
||||
return installed
|
||||
},
|
||||
|
||||
async _listMissingPoolPatchesOnHost(host) {
|
||||
const all = await this._getPoolPatchesForHost(host)
|
||||
const installed = this._getInstalledPoolPatchesOnHost(host)
|
||||
|
||||
const installable = { __proto__: null }
|
||||
forEach(all, (patch, uuid) => {
|
||||
if (installed[uuid]) {
|
||||
return
|
||||
}
|
||||
|
||||
for (const uuid of patch.conflicts) {
|
||||
if (uuid in installed) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
installable[uuid] = patch
|
||||
})
|
||||
|
||||
return installable
|
||||
},
|
||||
|
||||
async listMissingPoolPatchesOnHost(hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
// Returns an array to not break compatibility.
|
||||
return mapToArray(
|
||||
await (host.software_version.product_brand === 'XCP-ng'
|
||||
? this._xcpListHostUpdates(host)
|
||||
: this._listMissingPoolPatchesOnHost(host))
|
||||
)
|
||||
},
|
||||
|
||||
// eject all ISOs from all the host's VMs when installing patches
|
||||
// if hostRef is not specified: eject ISOs on all the pool's VMs
|
||||
async _ejectToolsIsos(hostRef) {
|
||||
return Promise.all(
|
||||
mapFilter(this.objects.all, vm => {
|
||||
if (vm.$type !== 'VM' || (hostRef && vm.resident_on !== hostRef)) {
|
||||
if (vm.$type !== 'vm' || (hostRef && vm.resident_on !== hostRef)) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -183,54 +139,235 @@ export default {
|
||||
)
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// LIST ----------------------------------------------------------------------
|
||||
|
||||
_isPoolPatchInstallableOnHost(patchUuid, host) {
|
||||
const installed = this._getInstalledPoolPatchesOnHost(host)
|
||||
// list all yum updates available for a XCP-ng host
|
||||
// (hostObject) → { uuid: patchObject }
|
||||
async _listXcpUpdates(host) {
|
||||
return JSON.parse(
|
||||
await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'check_update',
|
||||
{}
|
||||
)
|
||||
)
|
||||
},
|
||||
|
||||
if (installed[patchUuid]) {
|
||||
return false
|
||||
// list all patches provided by Citrix for this host version regardless
|
||||
// of if they're installed or not
|
||||
// ignores upgrade patches
|
||||
// (hostObject) → { uuid: patchObject }
|
||||
async _listPatches(host) {
|
||||
const versions = (await this._getXenUpdates()).versions
|
||||
|
||||
const hostVersions = host.software_version
|
||||
const version =
|
||||
versions[hostVersions.product_version] ||
|
||||
versions[hostVersions.product_version_text]
|
||||
|
||||
return version ? pickBy(version.patches, patch => !patch.upgrade) : {}
|
||||
},
|
||||
|
||||
// list patches installed on the host
|
||||
// (hostObject) → { uuid: boolean }
|
||||
_listInstalledPatches(host) {
|
||||
const installed = { __proto__: null }
|
||||
|
||||
// Legacy XS patches
|
||||
if (!useUpdateSystem(host)) {
|
||||
forEach(host.$patches, hostPatch => {
|
||||
installed[hostPatch.$pool_patch.uuid] = true
|
||||
})
|
||||
return installed
|
||||
}
|
||||
// ----------
|
||||
|
||||
let installable = true
|
||||
|
||||
forEach(installed, patch => {
|
||||
if (includes(patch.conflicts, patchUuid)) {
|
||||
installable = false
|
||||
|
||||
return false
|
||||
forEach(host.$updates, update => {
|
||||
// ignore packs
|
||||
if (update.name_label.startsWith('XS')) {
|
||||
installed[update.uuid] = true
|
||||
}
|
||||
})
|
||||
|
||||
return installed
|
||||
},
|
||||
|
||||
// TODO: handle upgrade patches
|
||||
// (hostObject, [ patchId ]) → [ patchObject ]
|
||||
async _listInstallablePatches(host, requestedPatches) {
|
||||
const all = await this._listPatches(host)
|
||||
const installed = this._listInstalledPatches(host)
|
||||
|
||||
let getAll = false
|
||||
if (requestedPatches === undefined) {
|
||||
getAll = true
|
||||
requestedPatches = Object.keys(all)
|
||||
}
|
||||
const freeHost = this.pool.$master.license_params.sku_type === 'free'
|
||||
// We assume:
|
||||
// - no conflict transitivity (If A conflicts with B and B with C, Citrix should tell us explicitly that A conflicts with C)
|
||||
// - no requirements transitivity (If A requires B and B requires C, Citrix should tell us explicitly that A requires C)
|
||||
// - sorted requirements (If A requires B and C, then C cannot require B)
|
||||
// For each requested patch:
|
||||
// - throw if not found
|
||||
// - throw if already installed
|
||||
// - ignore if already in installable (may have been added because of requirements)
|
||||
// - if paid patch on free host: either ignore (listing all the patches) or throw (patch is requested)
|
||||
// - throw if conflicting patches installed
|
||||
// - throw if conflicting patches in installable
|
||||
// - throw if one of the requirements is not found
|
||||
// - push its required patches in installable
|
||||
// - push it in installable
|
||||
const installable = []
|
||||
forEach(requestedPatches, id => {
|
||||
const patch = all[id]
|
||||
if (patch === undefined) {
|
||||
throw new Error(`patch not found: ${id}`)
|
||||
}
|
||||
|
||||
if (installed[id] !== undefined) {
|
||||
if (getAll) {
|
||||
return
|
||||
}
|
||||
throw new Error(`patch already installed: ${patch.name} (${id})`)
|
||||
}
|
||||
|
||||
if (find(installable, { id }) !== undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
if (patch.paid && freeHost) {
|
||||
if (getAll) {
|
||||
return
|
||||
}
|
||||
throw new Error(
|
||||
`requested patch ${patch.name} (${id}) requires a XenServer license`
|
||||
)
|
||||
}
|
||||
|
||||
let conflictId
|
||||
if (
|
||||
(conflictId = find(
|
||||
patch.conflicts,
|
||||
conflictId => installed[conflictId] !== undefined
|
||||
)) !== undefined
|
||||
) {
|
||||
if (getAll) {
|
||||
log(
|
||||
`patch ${
|
||||
patch.name
|
||||
} (${id}) conflicts with installed patch ${conflictId}`
|
||||
)
|
||||
return
|
||||
}
|
||||
throw new Error(
|
||||
`patch ${
|
||||
patch.name
|
||||
} (${id}) conflicts with installed patch ${conflictId}`
|
||||
)
|
||||
}
|
||||
|
||||
if (
|
||||
(conflictId = find(patch.conflicts, conflictId =>
|
||||
find(installable, { id: conflictId })
|
||||
)) !== undefined
|
||||
) {
|
||||
if (getAll) {
|
||||
log(`patches ${id} and ${conflictId} conflict with eachother`)
|
||||
return
|
||||
}
|
||||
throw new Error(
|
||||
`patches ${id} and ${conflictId} conflict with eachother`
|
||||
)
|
||||
}
|
||||
|
||||
// add requirements
|
||||
forEach(patch.requirements, id => {
|
||||
const requiredPatch = all[id]
|
||||
if (requiredPatch === undefined) {
|
||||
throw new Error(`required patch ${id} not found`)
|
||||
}
|
||||
if (!installed[id] && find(installable, { id }) === undefined) {
|
||||
if (requiredPatch.paid && freeHost) {
|
||||
throw new Error(
|
||||
`required patch ${
|
||||
requiredPatch.name
|
||||
} (${id}) requires a XenServer license`
|
||||
)
|
||||
}
|
||||
installable.push(requiredPatch)
|
||||
}
|
||||
})
|
||||
|
||||
// add itself
|
||||
installable.push(patch)
|
||||
})
|
||||
|
||||
return installable
|
||||
},
|
||||
|
||||
_isPoolPatchInstallableOnPool(patchUuid) {
|
||||
return every(
|
||||
this.objects.all,
|
||||
obj =>
|
||||
obj.$type !== 'host' ||
|
||||
this._isPoolPatchInstallableOnHost(patchUuid, obj)
|
||||
// high level
|
||||
listMissingPatches(hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
return _isXcp(host)
|
||||
? this._listXcpUpdates(host)
|
||||
: // TODO: list paid patches of free hosts as well so the UI can show them
|
||||
this._listInstallablePatches(host)
|
||||
},
|
||||
|
||||
// convenient method to find which patches should be installed from a
|
||||
// list of patch names
|
||||
// e.g.: compare the installed patches of 2 hosts by their
|
||||
// names (XS..E...) then find the patches global ID
|
||||
// [ names ] → [ IDs ]
|
||||
async findPatches(names) {
|
||||
const all = await this._listPatches(this.pool.$master)
|
||||
return filter(all, patch => names.includes(patch.name)).map(
|
||||
patch => patch.id
|
||||
)
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
// INSTALL -------------------------------------------------------------------
|
||||
|
||||
// platform_version < 2.1.1 ----------------------------------------
|
||||
async uploadPoolPatch(stream, patchName) {
|
||||
const patchRef = await this.putResource(stream, '/pool_patch_upload', {
|
||||
task: this.createTask('Patch upload', patchName),
|
||||
}).then(extractOpaqueRef)
|
||||
_xcpUpdate(hosts) {
|
||||
if (hosts === undefined) {
|
||||
hosts = filter(this.objects.all, { $type: 'host' })
|
||||
} else {
|
||||
hosts = filter(
|
||||
this.objects.all,
|
||||
obj => obj.$type === 'host' && hosts.includes(obj.$id)
|
||||
)
|
||||
}
|
||||
|
||||
return this._getOrWaitObject(patchRef)
|
||||
return asyncMap(hosts, async host => {
|
||||
const update = await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'update',
|
||||
{}
|
||||
)
|
||||
|
||||
if (JSON.parse(update).exit !== 0) {
|
||||
throw new Error('Update install failed')
|
||||
} else {
|
||||
await this._updateObjectMapProperty(host, 'other_config', {
|
||||
rpm_patch_installation_time: String(Date.now() / 1000),
|
||||
})
|
||||
}
|
||||
})
|
||||
},
|
||||
|
||||
async _getOrUploadPoolPatch(uuid) {
|
||||
// Legacy XS patches: upload a patch on a pool before installing it
|
||||
async _legacyUploadPatch(uuid) {
|
||||
// check if the patch has already been uploaded
|
||||
try {
|
||||
return this.getObjectByUuid(uuid)
|
||||
} catch (error) {}
|
||||
} catch (e) {}
|
||||
|
||||
log.debug(`downloading patch ${uuid}`)
|
||||
log.debug(`legacy downloading patch ${uuid}`)
|
||||
|
||||
const patchInfo = (await this._getXenUpdates()).patches[uuid]
|
||||
if (!patchInfo) {
|
||||
@@ -253,16 +390,21 @@ export default {
|
||||
.on('error', reject)
|
||||
})
|
||||
|
||||
return this.uploadPoolPatch(stream, patchInfo.name)
|
||||
const patchRef = await this.putResource(stream, '/pool_patch_upload', {
|
||||
task: this.createTask('Patch upload', patchInfo.name),
|
||||
}).then(extractOpaqueRef)
|
||||
|
||||
return this._getOrWaitObject(patchRef)
|
||||
},
|
||||
// ----------
|
||||
|
||||
// patform_version >= 2.1.1 ----------------------------------------
|
||||
async _getUpdateVdi($defer, patchUuid, hostId) {
|
||||
log.debug(`downloading patch ${patchUuid}`)
|
||||
// upload patch on a VDI on a shared SR
|
||||
async _uploadPatch($defer, uuid) {
|
||||
log.debug(`downloading patch ${uuid}`)
|
||||
|
||||
const patchInfo = (await this._getXenUpdates()).patches[patchUuid]
|
||||
const patchInfo = (await this._getXenUpdates()).patches[uuid]
|
||||
if (!patchInfo) {
|
||||
throw new Error('no such patch ' + patchUuid)
|
||||
throw new Error('no such patch ' + uuid)
|
||||
}
|
||||
|
||||
let stream = await this.xo.httpRequest(patchInfo.url)
|
||||
@@ -276,315 +418,104 @@ export default {
|
||||
.on('error', reject)
|
||||
})
|
||||
|
||||
let vdi
|
||||
|
||||
// If no hostId provided, try and find a shared SR
|
||||
if (!hostId) {
|
||||
const sr = this.findAvailableSharedSr(stream.length)
|
||||
|
||||
if (!sr) {
|
||||
return
|
||||
}
|
||||
|
||||
vdi = await this.createTemporaryVdiOnSr(
|
||||
stream,
|
||||
sr,
|
||||
'[XO] Patch ISO',
|
||||
'small temporary VDI to store a patch ISO'
|
||||
)
|
||||
} else {
|
||||
vdi = await this.createTemporaryVdiOnHost(
|
||||
stream,
|
||||
hostId,
|
||||
'[XO] Patch ISO',
|
||||
'small temporary VDI to store a patch ISO'
|
||||
)
|
||||
const sr = this.findAvailableSr(stream.length)
|
||||
if (sr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const vdi = await this.createTemporaryVdiOnSr(
|
||||
stream,
|
||||
sr,
|
||||
'[XO] Patch ISO',
|
||||
'small temporary VDI to store a patch ISO'
|
||||
)
|
||||
$defer(() => this._deleteVdi(vdi.$ref))
|
||||
|
||||
return vdi
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
_poolWideInstall: deferrable(async function($defer, patches) {
|
||||
// Legacy XS patches
|
||||
if (!useUpdateSystem(this.pool.$master)) {
|
||||
// for each patch: pool_patch.pool_apply
|
||||
for (const p of patches) {
|
||||
const [patch] = await Promise.all([
|
||||
this._legacyUploadPatch(p.uuid),
|
||||
this._ejectToolsIsos(this.pool.$master.$ref),
|
||||
])
|
||||
|
||||
// patform_version < 2.1.1 -----------------------------------------
|
||||
async _installPoolPatchOnHost(patchUuid, host) {
|
||||
const [patch] = await Promise.all([
|
||||
this._getOrUploadPoolPatch(patchUuid),
|
||||
this._ejectToolsIsos(host.$ref),
|
||||
])
|
||||
|
||||
await this.call('pool_patch.apply', patch.$ref, host.$ref)
|
||||
},
|
||||
|
||||
// patform_version >= 2.1.1
|
||||
_installPatchUpdateOnHost: deferrable(async function(
|
||||
$defer,
|
||||
patchUuid,
|
||||
host
|
||||
) {
|
||||
await this._assertConsistentHostServerTime(host.$ref)
|
||||
|
||||
const [vdi] = await Promise.all([
|
||||
this._getUpdateVdi($defer, patchUuid, host.$id),
|
||||
this._ejectToolsIsos(host.$ref),
|
||||
])
|
||||
|
||||
const updateRef = await this.call('pool_update.introduce', vdi.$ref)
|
||||
// TODO: check update status
|
||||
// const precheck = await this.call('pool_update.precheck', updateRef, host.$ref)
|
||||
// - ok_livepatch_complete An applicable live patch exists for every required component
|
||||
// - ok_livepatch_incomplete An applicable live patch exists but it is not sufficient
|
||||
// - ok There is no applicable live patch
|
||||
return this.call('pool_update.apply', updateRef, host.$ref)
|
||||
}),
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async installPoolPatchOnHost(patchUuid, host) {
|
||||
log.debug(`installing patch ${patchUuid}`)
|
||||
if (!isObject(host)) {
|
||||
host = this.getObject(host)
|
||||
}
|
||||
|
||||
return useUpdateSystem(host)
|
||||
? this._installPatchUpdateOnHost(patchUuid, host)
|
||||
: this._installPoolPatchOnHost(patchUuid, host)
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// platform_version < 2.1.1
|
||||
async _installPoolPatchOnAllHosts(patchUuid) {
|
||||
const [patch] = await Promise.all([
|
||||
this._getOrUploadPoolPatch(patchUuid),
|
||||
this._ejectToolsIsos(),
|
||||
])
|
||||
|
||||
await this.call('pool_patch.pool_apply', patch.$ref)
|
||||
},
|
||||
|
||||
// platform_version >= 2.1.1
|
||||
_installPatchUpdateOnAllHosts: deferrable(async function($defer, patchUuid) {
|
||||
await this._assertConsistentHostServerTime(this.pool.master)
|
||||
|
||||
let [vdi] = await Promise.all([
|
||||
this._getUpdateVdi($defer, patchUuid),
|
||||
this._ejectToolsIsos(),
|
||||
])
|
||||
if (vdi == null) {
|
||||
vdi = await this._getUpdateVdi($defer, patchUuid, this.pool.master)
|
||||
}
|
||||
|
||||
return this.call(
|
||||
'pool_update.pool_apply',
|
||||
await this.call('pool_update.introduce', vdi.$ref)
|
||||
)
|
||||
}),
|
||||
|
||||
async installPoolPatchOnAllHosts(patchUuid) {
|
||||
log.debug(`installing patch ${patchUuid} on all hosts`)
|
||||
|
||||
return useUpdateSystem(this.pool.$master)
|
||||
? this._installPatchUpdateOnAllHosts(patchUuid)
|
||||
: this._installPoolPatchOnAllHosts(patchUuid)
|
||||
},
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// If no host is provided, install on pool
|
||||
async _installPoolPatchAndRequirements(patch, patchesByUuid, host) {
|
||||
if (
|
||||
host == null
|
||||
? !this._isPoolPatchInstallableOnPool(patch.uuid)
|
||||
: !this._isPoolPatchInstallableOnHost(patch.uuid, host)
|
||||
) {
|
||||
await this.call('pool_patch.pool_apply', patch.$ref)
|
||||
}
|
||||
return
|
||||
}
|
||||
// ----------
|
||||
|
||||
const { requirements } = patch
|
||||
|
||||
if (requirements.length) {
|
||||
for (const requirementUuid of requirements) {
|
||||
const requirement = patchesByUuid[requirementUuid]
|
||||
|
||||
if (requirement != null) {
|
||||
await this._installPoolPatchAndRequirements(
|
||||
requirement,
|
||||
patchesByUuid,
|
||||
host
|
||||
)
|
||||
host = host && this.getObject(host.$id)
|
||||
}
|
||||
// for each patch: pool_update.introduce → pool_update.pool_apply
|
||||
for (const p of patches) {
|
||||
const [vdi] = await Promise.all([
|
||||
this._uploadPatch($defer, p.uuid),
|
||||
this._ejectToolsIsos(),
|
||||
])
|
||||
if (vdi === undefined) {
|
||||
throw new Error('patch could not be uploaded')
|
||||
}
|
||||
}
|
||||
|
||||
return host == null
|
||||
? this.installPoolPatchOnAllHosts(patch.uuid)
|
||||
: this.installPoolPatchOnHost(patch.uuid, host)
|
||||
},
|
||||
|
||||
async installSpecificPatchesOnHost(patchNames, hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
const missingPatches = await this._listMissingPoolPatchesOnHost(host)
|
||||
|
||||
const patchesToInstall = []
|
||||
const addPatchesToList = patches => {
|
||||
forEach(patches, patch => {
|
||||
addPatchesToList(mapToArray(patch.requirements, { uuid: patch.uuid }))
|
||||
|
||||
if (!find(patchesToInstall, { name: patch.name })) {
|
||||
patchesToInstall.push(patch)
|
||||
}
|
||||
})
|
||||
}
|
||||
addPatchesToList(
|
||||
mapToArray(patchNames, name => find(missingPatches, { name }))
|
||||
)
|
||||
|
||||
for (let i = 0, n = patchesToInstall.length; i < n; i++) {
|
||||
await this._installPoolPatchAndRequirements(
|
||||
patchesToInstall[i],
|
||||
missingPatches,
|
||||
host
|
||||
)
|
||||
}
|
||||
},
|
||||
|
||||
async installAllPoolPatchesOnHost(hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
if (host.software_version.product_brand === 'XCP-ng') {
|
||||
return this._xcpInstallHostUpdates(host)
|
||||
}
|
||||
return this._installAllPoolPatchesOnHost(host)
|
||||
},
|
||||
|
||||
async _installAllPoolPatchesOnHost(host) {
|
||||
const installableByUuid =
|
||||
host.license_params.sku_type !== 'free'
|
||||
? pickBy(await this._listMissingPoolPatchesOnHost(host), {
|
||||
upgrade: false,
|
||||
})
|
||||
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
|
||||
paid: false,
|
||||
upgrade: false,
|
||||
})
|
||||
|
||||
// List of all installable patches sorted from the newest to the
|
||||
// oldest.
|
||||
const installable = sortBy(
|
||||
installableByUuid,
|
||||
patch => -Date.parse(patch.date)
|
||||
)
|
||||
|
||||
for (let i = 0, n = installable.length; i < n; ++i) {
|
||||
const patch = installable[i]
|
||||
|
||||
if (this._isPoolPatchInstallableOnHost(patch.uuid, host)) {
|
||||
await this._installPoolPatchAndRequirements(
|
||||
patch,
|
||||
installableByUuid,
|
||||
host
|
||||
).catch(error => {
|
||||
if (
|
||||
error.code !== 'PATCH_ALREADY_APPLIED' &&
|
||||
error.code !== 'UPDATE_ALREADY_APPLIED'
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
host = this.getObject(host.$id)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
async installAllPoolPatchesOnAllHosts() {
|
||||
if (this.pool.$master.software_version.product_brand === 'XCP-ng') {
|
||||
return this._xcpInstallAllPoolUpdatesOnHost()
|
||||
}
|
||||
return this._installAllPoolPatchesOnAllHosts()
|
||||
},
|
||||
|
||||
async _installAllPoolPatchesOnAllHosts() {
|
||||
const installableByUuid = assign(
|
||||
{},
|
||||
...(await Promise.all(
|
||||
mapFilter(this.objects.all, host => {
|
||||
if (host.$type === 'host') {
|
||||
return this._listMissingPoolPatchesOnHost(host).then(patches =>
|
||||
host.license_params.sku_type !== 'free'
|
||||
? pickBy(patches, { upgrade: false })
|
||||
: pickBy(patches, { paid: false, upgrade: false })
|
||||
)
|
||||
}
|
||||
})
|
||||
))
|
||||
)
|
||||
|
||||
// List of all installable patches sorted from the newest to the
|
||||
// oldest.
|
||||
const installable = sortBy(
|
||||
installableByUuid,
|
||||
patch => -Date.parse(patch.date)
|
||||
)
|
||||
|
||||
for (let i = 0, n = installable.length; i < n; ++i) {
|
||||
const patch = installable[i]
|
||||
|
||||
await this._installPoolPatchAndRequirements(
|
||||
patch,
|
||||
installableByUuid
|
||||
).catch(error => {
|
||||
if (
|
||||
error.code !== 'PATCH_ALREADY_APPLIED' &&
|
||||
error.code !== 'UPDATE_ALREADY_APPLIED_IN_POOL'
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
// ----------------------------------
|
||||
// XCP-ng dedicated zone for patching
|
||||
// ----------------------------------
|
||||
|
||||
// list all yum updates available for a XCP-ng host
|
||||
async _xcpListHostUpdates(host) {
|
||||
return JSON.parse(
|
||||
log.debug(`installing patch ${p.uuid}`)
|
||||
await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'check_update',
|
||||
{}
|
||||
'pool_update.pool_apply',
|
||||
await this.call('pool_update.introduce', vdi.$ref)
|
||||
)
|
||||
)
|
||||
},
|
||||
|
||||
// install all yum updates for a XCP-ng host
|
||||
async _xcpInstallHostUpdates(host) {
|
||||
const update = await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'update',
|
||||
{}
|
||||
)
|
||||
|
||||
if (JSON.parse(update).exit !== 0) {
|
||||
throw new Error('Update install failed')
|
||||
} else {
|
||||
await this._updateObjectMapProperty(host, 'other_config', {
|
||||
rpm_patch_installation_time: String(Date.now() / 1000),
|
||||
})
|
||||
}
|
||||
}),
|
||||
|
||||
async _hostInstall(patches, host) {
|
||||
throw new Error('single host install not implemented')
|
||||
// Legacy XS patches
|
||||
// for each patch: pool_patch.apply
|
||||
// ----------
|
||||
// for each patch: pool_update.introduce → pool_update.apply
|
||||
},
|
||||
|
||||
// install all yum updates for all XCP-ng hosts in a give pool
|
||||
async _xcpInstallAllPoolUpdatesOnHost() {
|
||||
await asyncMap(filter(this.objects.all, { $type: 'host' }), host =>
|
||||
this._xcpInstallHostUpdates(host)
|
||||
)
|
||||
// high level
|
||||
// install specified patches on specified hosts
|
||||
//
|
||||
// no hosts specified: pool-wide install (only the pool master installed patches will be considered)
|
||||
// no patches specified: install either the pool master's missing patches (no hosts specified) or each host's missing patches
|
||||
//
|
||||
// patches will be ignored for XCP (always updates completely)
|
||||
// patches that are already installed will be ignored (XS only)
|
||||
//
|
||||
// XS pool-wide optimization only works when no hosts are specified
|
||||
// it may install more patches that specified if some of them require other patches
|
||||
async installPatches({ patches, hosts }) {
|
||||
// XCP
|
||||
if (_isXcp(this.pool.$master)) {
|
||||
return this._xcpUpdate(hosts)
|
||||
}
|
||||
|
||||
// XS
|
||||
// TODO: assert consistent time
|
||||
const poolWide = hosts === undefined
|
||||
if (poolWide) {
|
||||
log.debug('patches that were requested to be installed', patches)
|
||||
const installablePatches = await this._listInstallablePatches(
|
||||
this.pool.$master,
|
||||
patches
|
||||
)
|
||||
|
||||
log.debug(
|
||||
'patches that will actually be installed',
|
||||
installablePatches.map(patch => patch.uuid)
|
||||
)
|
||||
|
||||
return this._poolWideInstall(installablePatches)
|
||||
}
|
||||
|
||||
// for each host
|
||||
// get installable patches
|
||||
// filter patches that should be installed
|
||||
// sort patches
|
||||
// host-by-host install
|
||||
throw new Error('non pool-wide install not implemented')
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,14 +1,25 @@
|
||||
import { cancelable } from 'promise-toolbox'
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
|
||||
export default {
|
||||
@cancelable
|
||||
exportPoolMetadata($cancelToken) {
|
||||
const { pool } = this
|
||||
return this.getResource($cancelToken, '/pool/xmldbdump', {
|
||||
task: this.createTask(
|
||||
'Pool metadata',
|
||||
pool.name_label ?? pool.$master.name_label
|
||||
),
|
||||
return this.getResource($cancelToken, PATH_DB_DUMP, {
|
||||
task: this.createTask('Export pool metadata'),
|
||||
})
|
||||
},
|
||||
|
||||
// Restore the XAPI database from an XML backup
|
||||
//
|
||||
// See https://github.com/xapi-project/xen-api/blob/405b02e72f1ccc4f4b456fd52db30876faddcdd8/ocaml/xapi/pool_db_backup.ml#L170-L205
|
||||
@cancelable
|
||||
importPoolMetadata($cancelToken, stream, force = false) {
|
||||
return this.putResource($cancelToken, stream, PATH_DB_DUMP, {
|
||||
query: {
|
||||
dry_run: String(!force),
|
||||
},
|
||||
task: this.createTask('Import pool metadata'),
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,28 +1,27 @@
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import ms from 'ms'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { invalidCredentials, noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import parseDuration from '../_parseDuration'
|
||||
import Token, { Tokens } from '../models/token'
|
||||
import { forEach, generateToken } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const log = createLogger('xo:authentification')
|
||||
|
||||
const noSuchAuthenticationToken = id => noSuchObject(id, 'authenticationToken')
|
||||
|
||||
const ONE_MONTH = 1e3 * 60 * 60 * 24 * 30
|
||||
|
||||
export default class {
|
||||
constructor(xo) {
|
||||
constructor(xo, config) {
|
||||
this._config = config.authentication
|
||||
this._providers = new Set()
|
||||
this._xo = xo
|
||||
|
||||
// Store last failures by user to throttle tries (slow bruteforce
|
||||
// attacks).
|
||||
this._failures = { __proto__: null }
|
||||
|
||||
this._providers = new Set()
|
||||
|
||||
// Creates persistent collections.
|
||||
const tokensDb = (this._tokens = new Tokens({
|
||||
connection: xo._redis,
|
||||
@@ -38,7 +37,7 @@ export default class {
|
||||
|
||||
const user = await xo.getUserByName(username, true)
|
||||
if (user && (await xo.checkUserPassword(user.id, password))) {
|
||||
return user.id
|
||||
return { userId: user.id }
|
||||
}
|
||||
})
|
||||
|
||||
@@ -49,7 +48,8 @@ export default class {
|
||||
}
|
||||
|
||||
try {
|
||||
return (await xo.getAuthenticationToken(tokenId)).user_id
|
||||
const token = await xo.getAuthenticationToken(tokenId)
|
||||
return { expiration: token.expiration, userId: token.user_id }
|
||||
} catch (error) {}
|
||||
})
|
||||
|
||||
@@ -87,32 +87,47 @@ export default class {
|
||||
for (const provider of this._providers) {
|
||||
try {
|
||||
// A provider can return:
|
||||
// - `null` if the user could not be authenticated
|
||||
// - `undefined`/`null` if the user could not be authenticated
|
||||
// - the identifier of the authenticated user
|
||||
// - an object containing:
|
||||
// - `userId`
|
||||
// - optionally `expiration` to indicate when the session is no longer
|
||||
// valid
|
||||
// - an object with a property `username` containing the name
|
||||
// of the authenticated user
|
||||
const result = await provider(credentials)
|
||||
|
||||
// No match.
|
||||
if (!result) {
|
||||
if (result == null) {
|
||||
continue
|
||||
}
|
||||
|
||||
return result.username
|
||||
? await this._xo.registerUser(undefined, result.username)
|
||||
: await this._xo.getUser(result)
|
||||
if (typeof result === 'string') {
|
||||
return {
|
||||
user: await this._getUser(result),
|
||||
}
|
||||
}
|
||||
|
||||
const { userId, username, expiration } = result
|
||||
|
||||
return {
|
||||
user: await (userId !== undefined
|
||||
? this._xo.getUser(userId)
|
||||
: this._xo.registerUser(undefined, username)),
|
||||
expiration,
|
||||
}
|
||||
} catch (error) {
|
||||
// DEPRECATED: Authentication providers may just throw `null`
|
||||
// to indicate they could not authenticate the user without
|
||||
// any special errors.
|
||||
if (error) log.error(error)
|
||||
if (error !== null) log.error(error)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
async authenticateUser(credentials) {
|
||||
async authenticateUser(
|
||||
credentials
|
||||
): Promise<{| user: Object, expiration?: number |}> {
|
||||
// don't even attempt to authenticate with empty password
|
||||
const { password } = credentials
|
||||
if (password === '') {
|
||||
@@ -139,25 +154,31 @@ export default class {
|
||||
throw new Error('too fast authentication tries')
|
||||
}
|
||||
|
||||
const user = await this._authenticateUser(credentials)
|
||||
if (user) {
|
||||
delete failures[username]
|
||||
} else {
|
||||
const result = await this._authenticateUser(credentials)
|
||||
if (result === undefined) {
|
||||
failures[username] = now
|
||||
throw invalidCredentials()
|
||||
}
|
||||
|
||||
return user
|
||||
delete failures[username]
|
||||
return result
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async createAuthenticationToken({ expiresIn = ONE_MONTH, userId }) {
|
||||
async createAuthenticationToken({
|
||||
expiresIn = this._config.defaultTokenValidity,
|
||||
userId,
|
||||
}) {
|
||||
const token = new Token({
|
||||
id: await generateToken(),
|
||||
user_id: userId,
|
||||
expiration:
|
||||
Date.now() +
|
||||
(typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
|
||||
Math.min(
|
||||
parseDuration(expiresIn),
|
||||
parseDuration(this._config.maxTokenValidity)
|
||||
),
|
||||
})
|
||||
|
||||
await this._tokens.add(token)
|
||||
|
||||
@@ -53,14 +53,27 @@ const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
|
||||
return 1
|
||||
}
|
||||
|
||||
// type Task = {
|
||||
// data: any,
|
||||
// end?: number,
|
||||
// id: string,
|
||||
// jobId?: string,
|
||||
// jobName?: string,
|
||||
// message?: 'backup' | 'metadataRestore' | 'restore',
|
||||
// scheduleId?: string,
|
||||
// start: number,
|
||||
// status: 'pending' | 'failure' | 'interrupted' | 'skipped' | 'success',
|
||||
// tasks?: Task[],
|
||||
// }
|
||||
export default {
|
||||
async getBackupNgLogs(runId?: string) {
|
||||
const [jobLogs, restoreLogs] = await Promise.all([
|
||||
const [jobLogs, restoreLogs, restoreMetadataLogs] = await Promise.all([
|
||||
this.getLogs('jobs'),
|
||||
this.getLogs('restore'),
|
||||
this.getLogs('metadataRestore'),
|
||||
])
|
||||
|
||||
const { runningJobs, runningRestores } = this
|
||||
const { runningJobs, runningRestores, runningMetadataRestores } = this
|
||||
const consolidated = {}
|
||||
const started = {}
|
||||
|
||||
@@ -77,6 +90,7 @@ export default {
|
||||
id,
|
||||
jobId,
|
||||
jobName: data.jobName,
|
||||
message: 'backup',
|
||||
scheduleId,
|
||||
start: time,
|
||||
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',
|
||||
@@ -105,7 +119,8 @@ export default {
|
||||
if (parentId === undefined && (runId === undefined || runId === id)) {
|
||||
// top level task
|
||||
task.status =
|
||||
message === 'restore' && !runningRestores.has(id)
|
||||
(message === 'restore' && !runningRestores.has(id)) ||
|
||||
(message === 'metadataRestore' && !runningMetadataRestores.has(id))
|
||||
? 'interrupted'
|
||||
: 'pending'
|
||||
consolidated[id] = started[id] = task
|
||||
@@ -172,6 +187,7 @@ export default {
|
||||
|
||||
forEach(jobLogs, handleLog)
|
||||
forEach(restoreLogs, handleLog)
|
||||
forEach(restoreMetadataLogs, handleLog)
|
||||
|
||||
return runId === undefined ? consolidated : consolidated[runId]
|
||||
},
|
||||
|
||||
@@ -140,6 +140,7 @@ const defaultSettings: Settings = {
|
||||
concurrency: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
offlineSnapshot: false,
|
||||
reportWhen: 'failure',
|
||||
snapshotRetention: 0,
|
||||
@@ -475,10 +476,11 @@ const disableVmHighAvailability = async (xapi: Xapi, vm: Vm) => {
|
||||
//
|
||||
// - `other_config`:
|
||||
// - `xo:backup:datetime` = snapshot.snapshot_time (allow sorting replicated VMs)
|
||||
// - `xo:backup:deltaChainLength` = n (number of delta copies/replicated since a full)
|
||||
// - `xo:backup:exported` = 'true' (added at the end of the backup)
|
||||
// - `xo:backup:job` = job.id
|
||||
// - `xo:backup:schedule` = schedule.id
|
||||
// - `xo:backup:vm` = vm.uuid
|
||||
// - `xo:backup:exported` = 'true' (added at the end of the backup)
|
||||
//
|
||||
// Attributes of created VMs:
|
||||
//
|
||||
@@ -937,6 +939,7 @@ export default class BackupNg {
|
||||
},
|
||||
xapi._updateObjectMapProperty(vm, 'other_config', {
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
@@ -1293,12 +1296,31 @@ export default class BackupNg {
|
||||
$defer.onSuccess.call(xapi, 'deleteVm', baseSnapshot)
|
||||
}
|
||||
|
||||
let deltaChainLength = 0
|
||||
let fullVdisRequired
|
||||
await (async () => {
|
||||
if (baseSnapshot === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
let prevDeltaChainLength = +baseSnapshot.other_config[
|
||||
'xo:backup:deltaChainLength'
|
||||
]
|
||||
if (Number.isNaN(prevDeltaChainLength)) {
|
||||
prevDeltaChainLength = 0
|
||||
}
|
||||
deltaChainLength = prevDeltaChainLength + 1
|
||||
|
||||
const fullInterval = getSetting(settings, 'fullInterval', [
|
||||
vmUuid,
|
||||
scheduleId,
|
||||
'',
|
||||
])
|
||||
if (fullInterval !== 0 && fullInterval <= deltaChainLength) {
|
||||
baseSnapshot = undefined
|
||||
return
|
||||
}
|
||||
|
||||
const fullRequired = { __proto__: null }
|
||||
const vdis: $Dict<Vdi> = getVmDisks(baseSnapshot)
|
||||
|
||||
@@ -1626,6 +1648,15 @@ export default class BackupNg {
|
||||
],
|
||||
noop // errors are handled in logs
|
||||
)
|
||||
|
||||
if (!isFull) {
|
||||
ignoreErrors.call(
|
||||
snapshot.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(deltaChainLength)
|
||||
)
|
||||
)
|
||||
}
|
||||
} else {
|
||||
throw new Error(`no exporter for backup mode ${mode}`)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
// @flow
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
|
||||
import debounceWithKey from '../_pDebounceWithKey'
|
||||
import parseDuration from '../_parseDuration'
|
||||
import { type Xapi } from '../xapi'
|
||||
import {
|
||||
safeDateFormat,
|
||||
serializeError,
|
||||
type SimpleIdPattern,
|
||||
unboxIdsFromPattern,
|
||||
} from '../utils'
|
||||
@@ -13,8 +17,14 @@ import {
|
||||
import { type Executor, type Job } from './jobs'
|
||||
import { type Schedule } from './scheduling'
|
||||
|
||||
const log = createLogger('xo:xo-mixins:metadata-backups')
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
const METADATA_BACKUP_JOB_TYPE = 'metadataBackup'
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
type Settings = {|
|
||||
retentionXoMetadata?: number,
|
||||
retentionPoolMetadata?: number,
|
||||
@@ -29,6 +39,26 @@ type MetadataBackupJob = {
|
||||
xoMetadata?: boolean,
|
||||
}
|
||||
|
||||
const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
handler.list(path, options).catch(error => {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
log.warn(`${methodName} ${path}`, { error })
|
||||
}
|
||||
return []
|
||||
})
|
||||
|
||||
// metadata.json
|
||||
//
|
||||
// {
|
||||
// jobId: String,
|
||||
// jobName: String,
|
||||
// scheduleId: String,
|
||||
// scheduleName: String,
|
||||
// timestamp: number,
|
||||
// pool?: <Pool />
|
||||
// poolMaster?: <Host />
|
||||
// }
|
||||
//
|
||||
// File structure on remotes:
|
||||
//
|
||||
// <remote>
|
||||
@@ -43,7 +73,6 @@ type MetadataBackupJob = {
|
||||
// └─ <YYYYMMDD>T<HHmmss>
|
||||
// ├─ metadata.json
|
||||
// └─ data
|
||||
|
||||
export default class metadataBackup {
|
||||
_app: {
|
||||
createJob: (
|
||||
@@ -63,9 +92,30 @@ export default class metadataBackup {
|
||||
removeJob: (id: string) => Promise<void>,
|
||||
}
|
||||
|
||||
constructor(app: any) {
|
||||
get runningMetadataRestores() {
|
||||
return this._runningMetadataRestores
|
||||
}
|
||||
|
||||
constructor(app: any, { backup }) {
|
||||
this._app = app
|
||||
app.on('start', () => {
|
||||
this._logger = undefined
|
||||
this._runningMetadataRestores = new Set()
|
||||
|
||||
const debounceDelay = parseDuration(backup.listingDebounce)
|
||||
this._listXoMetadataBackups = debounceWithKey(
|
||||
this._listXoMetadataBackups,
|
||||
debounceDelay,
|
||||
remoteId => remoteId
|
||||
)
|
||||
this.__listPoolMetadataBackups = debounceWithKey(
|
||||
this._listPoolMetadataBackups,
|
||||
debounceDelay,
|
||||
remoteId => remoteId
|
||||
)
|
||||
|
||||
app.on('start', async () => {
|
||||
this._logger = await app.getLogger('metadataRestore')
|
||||
|
||||
app.registerJobExecutor(
|
||||
METADATA_BACKUP_JOB_TYPE,
|
||||
this._executor.bind(this)
|
||||
@@ -106,7 +156,7 @@ export default class metadataBackup {
|
||||
|
||||
const files = []
|
||||
if (job.xoMetadata && retentionXoMetadata > 0) {
|
||||
const xoMetadataDir = `xo-config-backups/${schedule.id}`
|
||||
const xoMetadataDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${xoMetadataDir}/${formattedTimestamp}`
|
||||
|
||||
const data = JSON.stringify(await app.exportConfig(), null, 2)
|
||||
@@ -131,7 +181,7 @@ export default class metadataBackup {
|
||||
files.push(
|
||||
...(await Promise.all(
|
||||
poolIds.map(async id => {
|
||||
const poolMetadataDir = `xo-pool-metadata-backups/${
|
||||
const poolMetadataDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${
|
||||
schedule.id
|
||||
}/${id}`
|
||||
const dir = `${poolMetadataDir}/${formattedTimestamp}`
|
||||
@@ -261,4 +311,210 @@ export default class metadataBackup {
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// xoBackups
|
||||
// [{
|
||||
// id: `${remoteId}/folderPath`,
|
||||
// jobId,
|
||||
// jobName,
|
||||
// scheduleId,
|
||||
// scheduleName,
|
||||
// timestamp
|
||||
// }]
|
||||
async _listXoMetadataBackups(remoteId, handler) {
|
||||
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
|
||||
|
||||
const backups = []
|
||||
await asyncMap(
|
||||
safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }),
|
||||
scheduleDir =>
|
||||
asyncMap(
|
||||
safeReaddir(scheduleDir, { prependDir: true }),
|
||||
async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: `${remoteId}${backupDir}`,
|
||||
...JSON.parse(
|
||||
String(await handler.readFile(`${backupDir}/metadata.json`))
|
||||
),
|
||||
})
|
||||
} catch (error) {
|
||||
log.warn(`listXoMetadataBackups ${backupDir}`, { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
// poolBackups
|
||||
// {
|
||||
// [<Pool ID>]: [{
|
||||
// id: `${remoteId}/folderPath`,
|
||||
// jobId,
|
||||
// jobName,
|
||||
// scheduleId,
|
||||
// scheduleName,
|
||||
// timestamp,
|
||||
// pool,
|
||||
// poolMaster,
|
||||
// }]
|
||||
// }
|
||||
async _listPoolMetadataBackups(remoteId, handler) {
|
||||
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
|
||||
|
||||
const backupsByPool = {}
|
||||
await asyncMap(
|
||||
safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }),
|
||||
scheduleDir =>
|
||||
asyncMap(safeReaddir(scheduleDir), poolId => {
|
||||
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
|
||||
return asyncMap(
|
||||
safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }),
|
||||
async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: `${remoteId}${backupDir}`,
|
||||
...JSON.parse(
|
||||
String(await handler.readFile(`${backupDir}/metadata.json`))
|
||||
),
|
||||
})
|
||||
} catch (error) {
|
||||
log.warn(`listPoolMetadataBackups ${backupDir}`, {
|
||||
error,
|
||||
})
|
||||
}
|
||||
}
|
||||
)
|
||||
})
|
||||
)
|
||||
|
||||
// delete empty entries and sort backups
|
||||
Object.keys(backupsByPool).forEach(poolId => {
|
||||
const backups = backupsByPool[poolId]
|
||||
if (backups.length === 0) {
|
||||
delete backupsByPool[poolId]
|
||||
} else {
|
||||
backups.sort(compareTimestamp)
|
||||
}
|
||||
})
|
||||
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
// {
|
||||
// xo: {
|
||||
// [remote ID]: xoBackups
|
||||
// },
|
||||
// pool: {
|
||||
// [remote ID]: poolBackups
|
||||
// }
|
||||
// }
|
||||
async listMetadataBackups(remoteIds: string[]) {
|
||||
const app = this._app
|
||||
|
||||
const xo = {}
|
||||
const pool = {}
|
||||
await Promise.all(
|
||||
remoteIds.map(async remoteId => {
|
||||
try {
|
||||
const handler = await app.getRemoteHandler(remoteId)
|
||||
|
||||
const [xoList, poolList] = await Promise.all([
|
||||
this._listXoMetadataBackups(remoteId, handler),
|
||||
this._listPoolMetadataBackups(remoteId, handler),
|
||||
])
|
||||
if (xoList.length !== 0) {
|
||||
xo[remoteId] = xoList
|
||||
}
|
||||
if (Object.keys(poolList).length !== 0) {
|
||||
pool[remoteId] = poolList
|
||||
}
|
||||
} catch (error) {
|
||||
log.warn(`listMetadataBackups for remote ${remoteId}`, { error })
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return {
|
||||
xo,
|
||||
pool,
|
||||
}
|
||||
}
|
||||
|
||||
// Task logs emitted in a restore execution:
|
||||
//
|
||||
// task.start(message: 'restore', data: <Metadata />)
|
||||
// └─ task.end
|
||||
async restoreMetadataBackup(id: string) {
|
||||
const app = this._app
|
||||
const logger = this._logger
|
||||
const message = 'metadataRestore'
|
||||
const [remoteId, dir, ...path] = id.split('/')
|
||||
const handler = await app.getRemoteHandler(remoteId)
|
||||
const metadataFolder = `${dir}/${path.join('/')}`
|
||||
|
||||
const taskId = logger.notice(message, {
|
||||
event: 'task.start',
|
||||
data: JSON.parse(
|
||||
String(await handler.readFile(`${metadataFolder}/metadata.json`))
|
||||
),
|
||||
})
|
||||
try {
|
||||
this._runningMetadataRestores.add(taskId)
|
||||
|
||||
let result
|
||||
if (dir === DIR_XO_CONFIG_BACKUPS) {
|
||||
result = await app.importConfig(
|
||||
JSON.parse(
|
||||
String(await handler.readFile(`${metadataFolder}/data.json`))
|
||||
)
|
||||
)
|
||||
} else {
|
||||
result = await app
|
||||
.getXapi(path[1])
|
||||
.importPoolMetadata(
|
||||
await handler.createReadStream(`${metadataFolder}/data`),
|
||||
true
|
||||
)
|
||||
}
|
||||
|
||||
logger.notice(message, {
|
||||
event: 'task.end',
|
||||
result,
|
||||
status: 'success',
|
||||
taskId,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(message, {
|
||||
event: 'task.end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId,
|
||||
})
|
||||
throw error
|
||||
} finally {
|
||||
this._runningMetadataRestores.delete(taskId)
|
||||
}
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(id: string) {
|
||||
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
|
||||
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
|
||||
const timestampReg = '\\d{8}T\\d{6}Z'
|
||||
|
||||
const regexp = new RegExp(
|
||||
`^/?${uuidReg}/${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`
|
||||
)
|
||||
|
||||
if (!regexp.test(id)) {
|
||||
throw new Error(`The id (${id}) not correspond to a metadata folder`)
|
||||
}
|
||||
const app = this._app
|
||||
const [remoteId, ...path] = id.split('/')
|
||||
|
||||
const handler = await app.getRemoteHandler(remoteId)
|
||||
return handler.rmtree(path.join('/'))
|
||||
}
|
||||
}
|
||||
|
||||
18
packages/xo-server/src/xo-mixins/patches.js
Normal file
18
packages/xo-server/src/xo-mixins/patches.js
Normal file
@@ -0,0 +1,18 @@
|
||||
import { differenceBy } from 'lodash'
|
||||
|
||||
export default class {
|
||||
constructor(xo) {
|
||||
this._xo = xo
|
||||
}
|
||||
|
||||
getPatchesDifference(hostA, hostB) {
|
||||
const patchesA = this._xo
|
||||
.getObject(hostA)
|
||||
.patches.map(patchId => this._xo.getObject(patchId))
|
||||
const patchesB = this._xo
|
||||
.getObject(hostB)
|
||||
.patches.map(patchId => this._xo.getObject(patchId))
|
||||
|
||||
return differenceBy(patchesA, patchesB, 'name').map(patch => patch.name)
|
||||
}
|
||||
}
|
||||
@@ -166,7 +166,7 @@ export default class {
|
||||
// save the new configuration.
|
||||
async configurePlugin(id, configuration) {
|
||||
const plugin = this._getRawPlugin(id)
|
||||
const metadata = await this._getPluginMetadata()
|
||||
const metadata = await this._getPluginMetadata(id)
|
||||
|
||||
if (metadata !== undefined) {
|
||||
configuration = sensitiveValues.merge(
|
||||
|
||||
@@ -68,22 +68,40 @@ export default class {
|
||||
let handler = handlers[id]
|
||||
if (handler === undefined) {
|
||||
handler = handlers[id] = getHandler(remote, this._remoteOptions)
|
||||
}
|
||||
|
||||
try {
|
||||
await handler.sync()
|
||||
ignoreErrors.call(this._updateRemote(id, { error: '' }))
|
||||
} catch (error) {
|
||||
ignoreErrors.call(this._updateRemote(id, { error: error.message }))
|
||||
throw error
|
||||
try {
|
||||
await handler.sync()
|
||||
ignoreErrors.call(this._updateRemote(id, { error: '' }))
|
||||
} catch (error) {
|
||||
ignoreErrors.call(this._updateRemote(id, { error: error.message }))
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
async testRemote(remote) {
|
||||
const handler = await this.getRemoteHandler(remote)
|
||||
return handler.test()
|
||||
async testRemote(remoteId) {
|
||||
const handler = await this.getRemoteHandler(remoteId)
|
||||
const { readRate, writeRate, ...answer } = await handler.test()
|
||||
|
||||
if (answer.success) {
|
||||
const benchmark = {
|
||||
readRate,
|
||||
timestamp: Date.now(),
|
||||
writeRate,
|
||||
}
|
||||
const remote = await this._getRemote(remoteId)
|
||||
|
||||
await this._updateRemote(remoteId, {
|
||||
benchmarks:
|
||||
remote.benchmarks !== undefined
|
||||
? [...remote.benchmarks.slice(-49), benchmark] // store 50 benchmarks
|
||||
: [benchmark],
|
||||
})
|
||||
}
|
||||
|
||||
return answer
|
||||
}
|
||||
|
||||
async getAllRemotesInfo() {
|
||||
@@ -150,7 +168,7 @@ export default class {
|
||||
}
|
||||
|
||||
@synchronized()
|
||||
async _updateRemote(id, { url, ...props }) {
|
||||
async _updateRemote(id, { benchmarks, url, ...props }) {
|
||||
const remote = await this._getRemote(id)
|
||||
|
||||
// url is handled separately to take care of obfuscated values
|
||||
@@ -158,6 +176,13 @@ export default class {
|
||||
remote.url = format(sensitiveValues.merge(parse(url), parse(remote.url)))
|
||||
}
|
||||
|
||||
if (
|
||||
benchmarks !== undefined ||
|
||||
(benchmarks = remote.benchmarks) !== undefined
|
||||
) {
|
||||
remote.benchmarks = JSON.stringify(benchmarks)
|
||||
}
|
||||
|
||||
patch(remote, props)
|
||||
|
||||
return (await this._remotes.update(remote)).properties
|
||||
|
||||
@@ -51,8 +51,8 @@ const levelPromise = db => {
|
||||
// ===================================================================
|
||||
|
||||
export default class {
|
||||
constructor(xo) {
|
||||
const dir = `${xo._config.datadir}/leveldb`
|
||||
constructor(xo, config) {
|
||||
const dir = `${config.datadir}/leveldb`
|
||||
this._db = ensureDir(dir).then(() => {
|
||||
return sublevel(
|
||||
levelup(dir, {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
// @flow
|
||||
|
||||
import defer from 'golike-defer'
|
||||
import { type Remote, getHandler } from '@xen-orchestra/fs'
|
||||
import { mergeVhd as mergeVhd_ } from 'vhd-lib'
|
||||
|
||||
@@ -12,7 +13,8 @@ global.Promise = require('bluebird')
|
||||
// $FlowFixMe
|
||||
const config: Object = JSON.parse(process.env.XO_CONFIG)
|
||||
|
||||
export async function mergeVhd(
|
||||
export const mergeVhd = defer(async function(
|
||||
$defer: any,
|
||||
parentRemote: Remote,
|
||||
parentPath: string,
|
||||
childRemote: Remote,
|
||||
@@ -21,9 +23,11 @@ export async function mergeVhd(
|
||||
const parentHandler = getHandler(parentRemote, config.remoteOptions)
|
||||
const childHandler = getHandler(childRemote, config.remoteOptions)
|
||||
|
||||
// DO NOT forget the handlers as it they are still in use in the main process
|
||||
await parentHandler.sync()
|
||||
$defer.call(parentHandler, 'forget')
|
||||
|
||||
await childHandler.sync()
|
||||
$defer.call(childHandler, 'forget')
|
||||
|
||||
return mergeVhd_(parentHandler, parentPath, childHandler, childPath)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -40,7 +40,7 @@ const log = createLogger('xo:xo-mixins:xen-servers')
|
||||
// - _xapis[server.id] id defined
|
||||
// - _serverIdsByPool[xapi.pool.$id] is server.id
|
||||
export default class {
|
||||
constructor(xo, { xapiOptions }) {
|
||||
constructor(xo, { guessVhdSizeOnImport, xapiOptions }) {
|
||||
this._objectConflicts = { __proto__: null } // TODO: clean when a server is disconnected.
|
||||
const serversDb = (this._servers = new Servers({
|
||||
connection: xo._redis,
|
||||
@@ -49,7 +49,10 @@ export default class {
|
||||
}))
|
||||
this._serverIdsByPool = { __proto__: null }
|
||||
this._stats = new XapiStats()
|
||||
this._xapiOptions = xapiOptions
|
||||
this._xapiOptions = {
|
||||
guessVhdSizeOnImport,
|
||||
...xapiOptions,
|
||||
}
|
||||
this._xapis = { __proto__: null }
|
||||
this._xo = xo
|
||||
|
||||
@@ -456,8 +459,8 @@ export default class {
|
||||
const xapis = this._xapis
|
||||
forEach(servers, server => {
|
||||
server.status = this._getXenServerStatus(server.id)
|
||||
if (server.status === 'connected' && server.label === undefined) {
|
||||
server.label = xapis[server.id].pool.name_label
|
||||
if (server.status === 'connected') {
|
||||
server.poolId = xapis[server.id].pool.uuid
|
||||
}
|
||||
|
||||
// Do not expose password.
|
||||
|
||||
@@ -25,11 +25,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"child-process-promise": "^2.0.3",
|
||||
"core-js": "3.0.0-beta.3",
|
||||
"core-js": "3.0.0",
|
||||
"pipette": "^0.9.3",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"tmp": "^0.0.33",
|
||||
"vhd-lib": "^0.5.1"
|
||||
"vhd-lib": "^0.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "xo-web",
|
||||
"version": "5.37.0",
|
||||
"version": "5.38.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Web interface client for Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -96,7 +96,7 @@
|
||||
"moment-timezone": "^0.5.14",
|
||||
"notifyjs": "^3.0.0",
|
||||
"otplib": "^10.0.1",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"prop-types": "^15.6.0",
|
||||
"qrcode": "^1.3.2",
|
||||
"random-password": "^0.1.2",
|
||||
|
||||
@@ -15,11 +15,7 @@ import {
|
||||
createFilter,
|
||||
createSelector,
|
||||
} from './selectors'
|
||||
import {
|
||||
installAllHostPatches,
|
||||
installAllPatchesOnPool,
|
||||
subscribeHostMissingPatches,
|
||||
} from './xo'
|
||||
import { installAllPatchesOnPool, subscribeHostMissingPatches } from './xo'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -43,17 +39,6 @@ const MISSING_PATCHES_COLUMNS = [
|
||||
),
|
||||
sortCriteria: (host, { missingPatches }) => missingPatches[host.id],
|
||||
},
|
||||
{
|
||||
name: _('patchUpdateButton'),
|
||||
itemRenderer: (host, { installAllHostPatches }) => (
|
||||
<ActionButton
|
||||
btnStyle='primary'
|
||||
handler={installAllHostPatches}
|
||||
handlerParam={host}
|
||||
icon='host-patch-update'
|
||||
/>
|
||||
),
|
||||
},
|
||||
]
|
||||
|
||||
const POOLS_MISSING_PATCHES_COLUMNS = [
|
||||
@@ -115,7 +100,9 @@ class HostsPatchesTable extends Component {
|
||||
pools[host.$pool] = true
|
||||
})
|
||||
|
||||
return Promise.all(map(keys(pools), installAllPatchesOnPool))
|
||||
return Promise.all(
|
||||
map(keys(pools), pool => installAllPatchesOnPool({ pool }))
|
||||
)
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
@@ -162,7 +149,6 @@ class HostsPatchesTable extends Component {
|
||||
: MISSING_PATCHES_COLUMNS
|
||||
}
|
||||
userData={{
|
||||
installAllHostPatches,
|
||||
missingPatches: this.state.missingPatches,
|
||||
pools,
|
||||
}}
|
||||
|
||||
@@ -2643,9 +2643,9 @@ export default {
|
||||
// Original text: "New"
|
||||
resourceSetNew: 'Nouvelle',
|
||||
|
||||
// Original text: "Try dropping some VMs files here, or click to select VMs to upload. Accept only .xva/.ova files."
|
||||
// Original text: "Drop OVA or XVA files here to import Virtual Machines."
|
||||
importVmsList:
|
||||
'Essayez de déposer des fichiers de VMs ici, ou bien cliquez pour sélectionner des VMs à téléverser. Seuls les fichiers .xva/.ova sont acceptés.',
|
||||
'Déposez ici vos fichiers OVA ou XVA pour importer des machines virtuelles.',
|
||||
|
||||
// Original text: "No selected VMs."
|
||||
noSelectedVms: 'Pas de VM sélectionnée.',
|
||||
|
||||
@@ -39,6 +39,13 @@ const messages = {
|
||||
hasInactivePath: 'Has an inactive path',
|
||||
pools: 'Pools',
|
||||
remotes: 'Remotes',
|
||||
type: 'Type',
|
||||
restore: 'Restore',
|
||||
delete: 'Delete',
|
||||
vms: 'VMs',
|
||||
metadata: 'Metadata',
|
||||
chooseBackup: 'Choose a backup',
|
||||
clickToShowError: 'Click to show error',
|
||||
|
||||
// ----- Modals -----
|
||||
alertOk: 'OK',
|
||||
@@ -443,6 +450,7 @@ const messages = {
|
||||
offlineSnapshotInfo: 'Shutdown VMs before snapshotting them',
|
||||
timeout: 'Timeout',
|
||||
timeoutInfo: 'Number of hours after which a job is considered failed',
|
||||
fullBackupInterval: 'Full backup interval',
|
||||
timeoutUnit: 'in hours',
|
||||
dbAndDrRequireEnterprisePlan: 'Delta Backup and DR require Enterprise plan',
|
||||
crRequiresPremiumPlan: 'CR requires Premium plan',
|
||||
@@ -517,6 +525,8 @@ const messages = {
|
||||
remoteState: 'State',
|
||||
remoteDevice: 'Device',
|
||||
remoteDisk: 'Disk (Used / Total)',
|
||||
remoteSpeed: 'Speed (Write / Read)',
|
||||
remoteSpeedInfo: 'Read and write rate speed performed during latest test',
|
||||
remoteOptions: 'Options',
|
||||
remoteShare: 'Share',
|
||||
remoteAction: 'Action',
|
||||
@@ -711,6 +721,14 @@ const messages = {
|
||||
displayAllHosts: 'Display all hosts of this pool',
|
||||
displayAllStorages: 'Display all storages of this pool',
|
||||
displayAllVMs: 'Display all VMs of this pool',
|
||||
licenseRestrictions: 'License restrictions',
|
||||
licenseRestrictionsModalTitle:
|
||||
'Warning: you are using a Free XenServer license',
|
||||
actionsRestricted: 'Some actions will be restricted.',
|
||||
counterRestrictionsOptions: 'You can:',
|
||||
counterRestrictionsOptionsXcp:
|
||||
'upgrade to XCP-ng for free to get rid of these restrictions',
|
||||
counterRestrictionsOptionsXsLicense: 'or get a commercial Citrix license',
|
||||
// ----- Pool tabs -----
|
||||
hostsTabName: 'Hosts',
|
||||
vmsTabName: 'Vms',
|
||||
@@ -754,10 +772,12 @@ const messages = {
|
||||
addSrLabel: 'Add SR',
|
||||
addVmLabel: 'Add VM',
|
||||
addHostLabel: 'Add Host',
|
||||
hostNeedsPatchUpdate:
|
||||
'This host needs to install {patches, number} patch{patches, plural, one {} other {es}} before it can be added to the pool. This operation may be long.',
|
||||
hostNeedsPatchUpdateNoInstall:
|
||||
"This host cannot be added to the pool because it's missing some patches.",
|
||||
missingPatchesPool:
|
||||
'The pool needs to install {nMissingPatches, number} patch{nMissingPatches, plural, one {} other {es}}. This operation may be long.',
|
||||
missingPatchesHost:
|
||||
'This host needs to install {nMissingPatches, number} patch{nMissingPatches, plural, one {} other {es}}. This operation may be long.',
|
||||
patchUpdateNoInstall:
|
||||
'This host cannot be added to the pool because the patches are not homogeneous.',
|
||||
addHostErrorTitle: 'Adding host failed',
|
||||
addHostNotHomogeneousErrorMessage: 'Host patches could not be homogenized.',
|
||||
disconnectServer: 'Disconnect',
|
||||
@@ -884,14 +904,14 @@ const messages = {
|
||||
hostAppliedPatches: 'Applied patches',
|
||||
hostMissingPatches: 'Missing patches',
|
||||
hostUpToDate: 'Host up-to-date!',
|
||||
installPatchWarningTitle: 'Non-recommended patch install',
|
||||
installPatchWarningContent:
|
||||
'This will install a patch only on this host. This is NOT the recommended way: please go into the Pool patch view and follow instructions there. If you are sure about this, you can continue anyway',
|
||||
installPatchWarningReject: 'Go to pool',
|
||||
installPatchWarningResolve: 'Install',
|
||||
installAllPatchesTitle: 'Install all patches',
|
||||
installAllPatchesContent: 'To install all patches go to pool.',
|
||||
installAllPatchesRedirect: 'Go to pool',
|
||||
installAllPatchesOnHostContent:
|
||||
'Are you sure you want to install all patches on this host?',
|
||||
patchRelease: 'Release',
|
||||
updatePluginNotInstalled:
|
||||
'Update plugin is not installed on this host. Please run `yum install xcp-ng-updater` first.',
|
||||
'An error occurred while fetching the patches. Please make sure the updater plugin is installed by running `yum install xcp-ng-updater` on the host.',
|
||||
showChangelog: 'Show changelog',
|
||||
changelog: 'Changelog',
|
||||
changelogPatch: 'Patch',
|
||||
@@ -900,6 +920,10 @@ const messages = {
|
||||
changelogDescription: 'Description',
|
||||
// ----- Pool patch tabs -----
|
||||
refreshPatches: 'Refresh patches',
|
||||
install: 'Install',
|
||||
installPatchesTitle: 'Install patch{nPatches, plural, one {} other {es}}',
|
||||
installPatchesContent:
|
||||
'Are you sure you want to install {nPatches, number} patch{nPatches, plural, one {} other {es}}?',
|
||||
installPoolPatches: 'Install pool patches',
|
||||
confirmPoolPatch:
|
||||
'Are you sure you want to install all the patches on this pool?',
|
||||
@@ -1360,8 +1384,7 @@ const messages = {
|
||||
resourceSetNew: 'New',
|
||||
|
||||
// ---- VM import ---
|
||||
importVmsList:
|
||||
'Try dropping some VMs files here, or click to select VMs to upload. Accept only .xva/.ova files.',
|
||||
importVmsList: 'Drop OVA or XVA files here to import Virtual Machines.',
|
||||
noSelectedVms: 'No selected VMs.',
|
||||
vmImportToPool: 'To Pool:',
|
||||
vmImportToSr: 'To SR:',
|
||||
@@ -1418,6 +1441,7 @@ const messages = {
|
||||
simpleBackup: 'simple',
|
||||
delta: 'delta',
|
||||
restoreBackups: 'Restore Backups',
|
||||
noBackups: 'There are no backups!',
|
||||
restoreBackupsInfo: 'Click on a VM to display restore options',
|
||||
restoreDeltaBackupsInfo:
|
||||
'Only the files of Delta Backup which are not on a SMB remote can be restored',
|
||||
@@ -1458,10 +1482,16 @@ const messages = {
|
||||
restoreVmBackupsStart:
|
||||
'Start VM{nVms, plural, one {} other {s}} after restore',
|
||||
restoreVmBackupsBulkErrorTitle: 'Multi-restore error',
|
||||
restoreMetadataBackupTitle: 'Restore {item}',
|
||||
bulkRestoreMetadataBackupTitle:
|
||||
'Restore {nMetadataBackups, number} metadata backup{nMetadataBackups, plural, one {} other {s}}',
|
||||
bulkRestoreMetadataBackupMessage:
|
||||
'Restore {nMetadataBackups, number} metadata backup{nMetadataBackups, plural, one {} other {s}} from {nMetadataBackups, plural, one {its} other {their}} {oldestOrLatest} backup',
|
||||
deleteMetadataBackupTitle: 'Delete {item} backup',
|
||||
restoreVmBackupsBulkErrorMessage: 'You need to select a destination SR',
|
||||
deleteVmBackups: 'Delete backups…',
|
||||
deleteVmBackupsTitle: 'Delete {vm} backups',
|
||||
deleteVmBackupsSelect: 'Select backups to delete:',
|
||||
deleteBackupsSelect: 'Select backups to delete:',
|
||||
deleteVmBackupsSelectAll: 'All',
|
||||
deleteVmBackupsBulkTitle: 'Delete backups',
|
||||
deleteVmBackupsBulkMessage:
|
||||
@@ -1469,6 +1499,11 @@ const messages = {
|
||||
deleteVmBackupsBulkConfirmText:
|
||||
'delete {nBackups} backup{nBackups, plural, one {} other {s}}',
|
||||
unknownJob: 'Unknown job',
|
||||
bulkDeleteMetadataBackupsTitle: 'Delete metadata backups',
|
||||
bulkDeleteMetadataBackupsMessage:
|
||||
'Are you sure you want to delete all the backups from {nMetadataBackups, number} metadata backup{nMetadataBackups, plural, one {} other {s}}?',
|
||||
bulkDeleteMetadataBackupsConfirmText:
|
||||
'delete {nMetadataBackups} metadata backup{nMetadataBackups, plural, one {} other {s}}',
|
||||
|
||||
// ----- Restore files view -----
|
||||
listRemoteBackups: 'List remote backups',
|
||||
@@ -1903,6 +1938,7 @@ const messages = {
|
||||
logsJobName: 'Job name',
|
||||
logsBackupTime: 'Backup time',
|
||||
logsRestoreTime: 'Restore time',
|
||||
copyLogToClipboard: 'Copy log to clipboard',
|
||||
logsVmNotFound: 'VM not found!',
|
||||
logsMissingVms: 'Missing VMs skipped ({ vms })',
|
||||
logsFailedRestoreError: 'Click to show error',
|
||||
|
||||
@@ -6,6 +6,7 @@ import { keyBy, map } from 'lodash'
|
||||
|
||||
import _ from '../intl'
|
||||
import Component from '../base-component'
|
||||
import getEventValue from '../get-event-value'
|
||||
import { EMPTY_OBJECT } from '../utils'
|
||||
|
||||
import GenericInput from './generic-input'
|
||||
@@ -33,6 +34,14 @@ export default class ObjectInput extends Component {
|
||||
})
|
||||
}
|
||||
|
||||
_onUseChange = event => {
|
||||
const use = getEventValue(event)
|
||||
if (!use) {
|
||||
this.props.onChange()
|
||||
}
|
||||
this.setState({ use })
|
||||
}
|
||||
|
||||
_getRequiredProps = createSelector(
|
||||
() => this.props.schema.required,
|
||||
required => (required ? keyBy(required) : EMPTY_OBJECT)
|
||||
@@ -67,7 +76,7 @@ export default class ObjectInput extends Component {
|
||||
<input
|
||||
checked={use}
|
||||
disabled={disabled}
|
||||
onChange={this.linkState('use')}
|
||||
onChange={this._onUseChange}
|
||||
type='checkbox'
|
||||
/>{' '}
|
||||
{_('fillOptionalInformations')}
|
||||
|
||||
@@ -332,8 +332,8 @@ export const createSortForType = invoke(() => {
|
||||
const iterateesByType = {
|
||||
message: message => message.time,
|
||||
PIF: pif => pif.device,
|
||||
patch: patch => patch.name,
|
||||
pool: pool => pool.name_label,
|
||||
pool_patch: patch => patch.name,
|
||||
tag: tag => tag,
|
||||
VBD: vbd => vbd.position,
|
||||
'VDI-snapshot': snapshot => snapshot.snapshot_time,
|
||||
@@ -494,37 +494,18 @@ export const createGetObjectMessages = objectSelector =>
|
||||
export const getObject = createGetObject((_, id) => id)
|
||||
|
||||
export const createDoesHostNeedRestart = hostSelector => {
|
||||
// XS < 7.1
|
||||
const patchRequiresReboot = createGetObjectsOfType('pool_patch')
|
||||
.pick(
|
||||
// Returns the first patch of the host which requires it to be
|
||||
// restarted.
|
||||
create(
|
||||
createGetObjectsOfType('host_patch')
|
||||
.pick((state, props) => {
|
||||
const host = hostSelector(state, props)
|
||||
return host && host.patches
|
||||
})
|
||||
.filter(
|
||||
create(
|
||||
(state, props) => {
|
||||
const host = hostSelector(state, props)
|
||||
return host && host.startTime
|
||||
},
|
||||
startTime => patch => patch.time > startTime
|
||||
)
|
||||
),
|
||||
hostPatches => map(hostPatches, hostPatch => hostPatch.pool_patch)
|
||||
const patchRequiresReboot = createGetObjectsOfType('patch')
|
||||
.pick(create(hostSelector, host => host.patches))
|
||||
.find(
|
||||
create(hostSelector, host => ({ guidance, time, upgrade }) =>
|
||||
time > host.startTime &&
|
||||
(upgrade ||
|
||||
some(
|
||||
guidance,
|
||||
action => action === 'restartHost' || action === 'restartXapi'
|
||||
))
|
||||
)
|
||||
)
|
||||
.find([
|
||||
({ guidance, upgrade }) =>
|
||||
upgrade ||
|
||||
find(
|
||||
guidance,
|
||||
action => action === 'restartHost' || action === 'restartXapi'
|
||||
),
|
||||
])
|
||||
|
||||
return create(
|
||||
hostSelector,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import * as CM from 'complex-matcher'
|
||||
import { get, identity, isEmpty } from 'lodash'
|
||||
import { escapeRegExp, get, identity, isEmpty } from 'lodash'
|
||||
|
||||
import { EMPTY_OBJECT } from './../utils'
|
||||
|
||||
@@ -59,7 +59,7 @@ export const constructSmartPattern = (
|
||||
|
||||
const valueToComplexMatcher = pattern => {
|
||||
if (typeof pattern === 'string') {
|
||||
return new CM.String(pattern)
|
||||
return new CM.RegExpNode(`^${escapeRegExp(pattern)}$`)
|
||||
}
|
||||
|
||||
if (Array.isArray(pattern)) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user