Compare commits
275 Commits
xo-web-v5.
...
xen-api-v0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bdad6c0f6d | ||
|
|
ff1ca5d933 | ||
|
|
2cf4c494a4 | ||
|
|
95ac0a861a | ||
|
|
746c301f39 | ||
|
|
6455b12b58 | ||
|
|
485b8fe993 | ||
|
|
d7527f280c | ||
|
|
d57fa4375d | ||
|
|
d9e42c6625 | ||
|
|
28293d3fce | ||
|
|
d505401446 | ||
|
|
fafc24aeae | ||
|
|
f78ef0d208 | ||
|
|
8384cc3652 | ||
|
|
60aa18a229 | ||
|
|
3d64b42a89 | ||
|
|
b301997d4b | ||
|
|
ab34743250 | ||
|
|
bc14a1d167 | ||
|
|
2886ec116f | ||
|
|
c2beb2a5fa | ||
|
|
d6ac10f527 | ||
|
|
9dcd8a707a | ||
|
|
e1e97ef158 | ||
|
|
5d6b37f81a | ||
|
|
e1da08ba38 | ||
|
|
1dfb50fefd | ||
|
|
5c06ebc9c8 | ||
|
|
52a9270fb0 | ||
|
|
82247d7422 | ||
|
|
b34688043f | ||
|
|
ce4bcbd19d | ||
|
|
cde9a02c32 | ||
|
|
fe1da4ea12 | ||
|
|
a73306817b | ||
|
|
54e683d3d4 | ||
|
|
f49910ca82 | ||
|
|
4052f7f736 | ||
|
|
b47e097983 | ||
|
|
e44dbfb2a4 | ||
|
|
7d69dd9400 | ||
|
|
e6aae8fcfa | ||
|
|
da800b3391 | ||
|
|
3a574bcecc | ||
|
|
1bb0e234e7 | ||
|
|
b7e14ebf2a | ||
|
|
2af1207702 | ||
|
|
ecfed30e6e | ||
|
|
d06c3e3dd8 | ||
|
|
16b3fbeb16 | ||
|
|
0938804947 | ||
|
|
851bcf9816 | ||
|
|
9f6fc785bc | ||
|
|
56636bf5d4 | ||
|
|
3899a65167 | ||
|
|
628e53c1c3 | ||
|
|
9fa424dd8d | ||
|
|
3e6f2eecfa | ||
|
|
cc655c8ba8 | ||
|
|
78aa0474ee | ||
|
|
9caefa2f49 | ||
|
|
478726fa3b | ||
|
|
f64917ec52 | ||
|
|
2bc25f91c4 | ||
|
|
623d7ffe2f | ||
|
|
07510b5099 | ||
|
|
9f21f9a7bc | ||
|
|
93da70709e | ||
|
|
00436e744a | ||
|
|
1e642fc512 | ||
|
|
6baef2450c | ||
|
|
600f34f85a | ||
|
|
6c0c6bc5c4 | ||
|
|
fcd62ed3cd | ||
|
|
785f2e3a6d | ||
|
|
c2925f7c1e | ||
|
|
60814d8b58 | ||
|
|
2dec448f2c | ||
|
|
b71f4f6800 | ||
|
|
558083a916 | ||
|
|
d507ed9dff | ||
|
|
7ed0242662 | ||
|
|
d7b3d989d7 | ||
|
|
707b2f77f0 | ||
|
|
5ddbb76979 | ||
|
|
97b0fe62d4 | ||
|
|
8ac9b2cdc7 | ||
|
|
bc4c1a13e6 | ||
|
|
d3ec303ade | ||
|
|
6cfc2a1ba6 | ||
|
|
e15cadc863 | ||
|
|
2f9284c263 | ||
|
|
2465852fd6 | ||
|
|
a9f48a0d50 | ||
|
|
4ed0035c67 | ||
|
|
b66f2dfb80 | ||
|
|
3cb155b129 | ||
|
|
df7efc04e2 | ||
|
|
a21a8457a4 | ||
|
|
020955f535 | ||
|
|
51f23a5f03 | ||
|
|
d024319441 | ||
|
|
f8f35938c0 | ||
|
|
2573ace368 | ||
|
|
6bf7269814 | ||
|
|
6695c7bf5e | ||
|
|
44a83fd817 | ||
|
|
08ddfe0649 | ||
|
|
5ba170bf1f | ||
|
|
8150d3110c | ||
|
|
312b33ae85 | ||
|
|
008eb995ed | ||
|
|
6d8848043c | ||
|
|
cf572c0cc5 | ||
|
|
18cfa7dd29 | ||
|
|
72cac2bbd6 | ||
|
|
48ffa28e0b | ||
|
|
2e6baeb95a | ||
|
|
3b5650dc1e | ||
|
|
3279728e4b | ||
|
|
fe0dcbacc5 | ||
|
|
7c5d90fe40 | ||
|
|
944dad6e36 | ||
|
|
6713d3ec66 | ||
|
|
6adadb2359 | ||
|
|
b01096876c | ||
|
|
60243d8517 | ||
|
|
94d0809380 | ||
|
|
e935dd9bad | ||
|
|
30aa2b83d0 | ||
|
|
fc42c58079 | ||
|
|
ee9443cf16 | ||
|
|
f91d4a07eb | ||
|
|
c5a5ef6c93 | ||
|
|
7559fbdab7 | ||
|
|
7925ee8fee | ||
|
|
fea5117ed8 | ||
|
|
468a2c5bf3 | ||
|
|
c728eeaffa | ||
|
|
6aa8e0d4ce | ||
|
|
76ae54ff05 | ||
|
|
344e9e06d0 | ||
|
|
d866bccf3b | ||
|
|
3931c4cf4c | ||
|
|
420f1c77a1 | ||
|
|
59106aa29e | ||
|
|
4216a5808a | ||
|
|
12a7000e36 | ||
|
|
685355c6fb | ||
|
|
66f685165e | ||
|
|
8e8b1c009a | ||
|
|
705d069246 | ||
|
|
58e8d75935 | ||
|
|
5eb1454e67 | ||
|
|
04b31db41b | ||
|
|
29b4cf414a | ||
|
|
7a2a88b7ad | ||
|
|
dc34f3478d | ||
|
|
58175a4f5e | ||
|
|
c4587c11bd | ||
|
|
5b1a5f4fe7 | ||
|
|
ee2db918f3 | ||
|
|
0695bafb90 | ||
|
|
8e116063bf | ||
|
|
3f3b372f89 | ||
|
|
24cc1e8e29 | ||
|
|
e988ad4df9 | ||
|
|
5c12d4a546 | ||
|
|
d90b85204d | ||
|
|
6332355031 | ||
|
|
4ce702dfdf | ||
|
|
362a381dfb | ||
|
|
0eec4ee2f7 | ||
|
|
b92390087b | ||
|
|
bce4d5d96f | ||
|
|
27262ff3e8 | ||
|
|
444b6642f1 | ||
|
|
67d11020bb | ||
|
|
7603974370 | ||
|
|
6cb5639243 | ||
|
|
0c5a37d8a3 | ||
|
|
78cc7fe664 | ||
|
|
2d51bef390 | ||
|
|
bc68fff079 | ||
|
|
0a63acac73 | ||
|
|
e484b073e1 | ||
|
|
b2813d7cc0 | ||
|
|
29b941868d | ||
|
|
37af47ecff | ||
|
|
8eb28d40da | ||
|
|
383dd7b38e | ||
|
|
b13b3fe9f6 | ||
|
|
04a5f55b16 | ||
|
|
4ab1de918e | ||
|
|
44fc5699fd | ||
|
|
dd6c3ff434 | ||
|
|
d747b937ee | ||
|
|
9aa63d0354 | ||
|
|
36220ac1c5 | ||
|
|
d8eb5d4934 | ||
|
|
b580ea98a7 | ||
|
|
0ad68c2280 | ||
|
|
b16f1899ac | ||
|
|
7e740a429a | ||
|
|
61b1bd2533 | ||
|
|
d6ddba8e56 | ||
|
|
d10c7f3898 | ||
|
|
2b2c2c42f1 | ||
|
|
efc65a0669 | ||
|
|
d8e0727d4d | ||
|
|
a46a95b6fa | ||
|
|
ab4c3bc416 | ||
|
|
8a2f012b79 | ||
|
|
5fd9eea3f6 | ||
|
|
1b12aa90de | ||
|
|
dfb6d1b58e | ||
|
|
53add3bf2d | ||
|
|
63414d5db9 | ||
|
|
1312df8c88 | ||
|
|
94d36c3458 | ||
|
|
0c3623e0f8 | ||
|
|
ad01fcc880 | ||
|
|
b7f20a963f | ||
|
|
c51aad61eb | ||
|
|
12bbdba82c | ||
|
|
eb3760ee4a | ||
|
|
af00adcfcc | ||
|
|
93985e1a51 | ||
|
|
36f7af8576 | ||
|
|
0608cda6d7 | ||
|
|
9565823900 | ||
|
|
48b833c3b3 | ||
|
|
9990439594 | ||
|
|
e9fb37325d | ||
|
|
810c976d37 | ||
|
|
c1cbc3b5aa | ||
|
|
8298db1f2e | ||
|
|
47844fcf69 | ||
|
|
f26f8b2af9 | ||
|
|
b246e84c48 | ||
|
|
6545e47193 | ||
|
|
0a78c2bb94 | ||
|
|
36102e0dff | ||
|
|
bce0bf05e5 | ||
|
|
55b762f490 | ||
|
|
ad58f6a147 | ||
|
|
d67038c78d | ||
|
|
4badf48c45 | ||
|
|
449dd2998b | ||
|
|
c613b4cab3 | ||
|
|
370a0e8851 | ||
|
|
eb4f9f0b18 | ||
|
|
bbf5e82c5d | ||
|
|
27835bfbd0 | ||
|
|
f663dbe7a7 | ||
|
|
02e7eeec51 | ||
|
|
29a7bd0cb2 | ||
|
|
0fd22b9fd8 | ||
|
|
df809baaaf | ||
|
|
cfd956631b | ||
|
|
23687f62f0 | ||
|
|
5aabea1121 | ||
|
|
eac07a96de | ||
|
|
e9d1876699 | ||
|
|
f25705d559 | ||
|
|
ea48136797 | ||
|
|
270185d9dc | ||
|
|
308d53dc6b | ||
|
|
a97c5f4cd9 | ||
|
|
3d7e0df4dd | ||
|
|
53a0b7eed0 | ||
|
|
1ed2a6b620 | ||
|
|
76f9017482 | ||
|
|
86425f5d51 |
@@ -3,63 +3,12 @@
|
||||
# Julien Fontanet's configuration
|
||||
# https://gist.github.com/julien-f/8096213
|
||||
|
||||
# Top-most EditorConfig file.
|
||||
root = true
|
||||
|
||||
# Common config.
|
||||
[*]
|
||||
charset = utf-8
|
||||
end_of_line = lf
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# CoffeeScript
|
||||
#
|
||||
# https://github.com/polarmobile/coffeescript-style-guide/blob/master/README.md
|
||||
[*.{,lit}coffee]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Markdown
|
||||
[*.{md,mdwn,mdown,markdown}]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
# Package.json
|
||||
#
|
||||
# This indentation style is the one used by npm.
|
||||
[package.json]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Pug (Jade)
|
||||
[*.{jade,pug}]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# JavaScript
|
||||
#
|
||||
# Two spaces seems to be the standard most common style, at least in
|
||||
# Node.js (http://nodeguide.com/style.html#tabs-vs-spaces).
|
||||
[*.{js,jsx,ts,tsx}]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Less
|
||||
[*.less]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# Sass
|
||||
#
|
||||
# Style used for http://libsass.com
|
||||
[*.s[ac]ss]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
# YAML
|
||||
#
|
||||
# Only spaces are allowed.
|
||||
[*.yaml]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
22
.eslintrc.js
@@ -1,5 +1,11 @@
|
||||
module.exports = {
|
||||
extends: ['standard', 'standard-jsx', 'prettier'],
|
||||
extends: [
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
'prettier/standard',
|
||||
'prettier/react',
|
||||
],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
@@ -10,6 +16,16 @@ module.exports = {
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['packages/*cli*/**/*.js', '*-cli.js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
@@ -17,12 +33,10 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
|
||||
// See https://github.com/prettier/eslint-config-prettier/issues/65
|
||||
'react/jsx-indent': 'off',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/async-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/async-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -46,6 +46,12 @@ const getConfig = (key, ...args) => {
|
||||
: config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
|
||||
module.exports = function(pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
presets === undefined && (presets = {})
|
||||
@@ -61,7 +67,13 @@ module.exports = function(pkg, plugins, presets) {
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
const oA = pluginsOrder.indexOf(a)
|
||||
const oB = pluginsOrder.indexOf(b)
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/babel-config",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
}
|
||||
|
||||
@@ -82,35 +82,26 @@ ${cliName} v${pkg.version}
|
||||
)
|
||||
|
||||
await Promise.all([
|
||||
srcXapi.setFieldEntries(srcSnapshot, 'other_config', metadata),
|
||||
srcXapi.setFieldEntries(srcSnapshot, 'other_config', {
|
||||
'xo:backup:exported': 'true',
|
||||
}),
|
||||
tgtXapi.setField(
|
||||
tgtVm,
|
||||
'name_label',
|
||||
`${srcVm.name_label} (${srcSnapshot.snapshot_time})`
|
||||
),
|
||||
tgtXapi.setFieldEntries(tgtVm, 'other_config', metadata),
|
||||
tgtXapi.setFieldEntries(tgtVm, 'other_config', {
|
||||
srcSnapshot.update_other_config(metadata),
|
||||
srcSnapshot.update_other_config('xo:backup:exported', 'true'),
|
||||
tgtVm.set_name_label(`${srcVm.name_label} (${srcSnapshot.snapshot_time})`),
|
||||
tgtVm.update_other_config(metadata),
|
||||
tgtVm.update_other_config({
|
||||
'xo:backup:sr': tgtSr.uuid,
|
||||
'xo:copy_of': srcSnapshotUuid,
|
||||
}),
|
||||
tgtXapi.setFieldEntries(tgtVm, 'blocked_operations', {
|
||||
start:
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.',
|
||||
}),
|
||||
tgtVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
Promise.all(
|
||||
userDevices.map(userDevice => {
|
||||
const srcDisk = srcDisks[userDevice]
|
||||
const tgtDisk = tgtDisks[userDevice]
|
||||
|
||||
return tgtXapi.setFieldEntry(
|
||||
tgtDisk,
|
||||
'other_config',
|
||||
'xo:copy_of',
|
||||
srcDisk.uuid
|
||||
)
|
||||
return tgtDisk.update_other_config({
|
||||
'xo:copy_of': srcDisk.uuid,
|
||||
})
|
||||
})
|
||||
),
|
||||
])
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cr-seed-cli",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/cr-seed-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -15,6 +16,6 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.4.1",
|
||||
"xen-api": "^0.24.0"
|
||||
"xen-api": "^0.25.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/cron",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/defined",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/defined",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/emit-async",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.6.0",
|
||||
"version": "0.8.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/fs",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -23,11 +24,12 @@
|
||||
"@marsaud/smb2": "^0.13.0",
|
||||
"@sindresorhus/df": "^2.1.0",
|
||||
"@xen-orchestra/async-map": "^0.0.0",
|
||||
"decorator-synchronized": "^0.5.0",
|
||||
"execa": "^1.0.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"readable-stream": "^3.0.6",
|
||||
"through2": "^3.0.0",
|
||||
"tmp": "^0.0.33",
|
||||
@@ -43,7 +45,7 @@
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"dotenv": "^6.1.0",
|
||||
"dotenv": "^7.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
import { join } from 'path'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
@@ -21,7 +22,13 @@ export default class MountHandler extends LocalHandler {
|
||||
super(remote, opts)
|
||||
|
||||
this._execa = useSudo ? sudoExeca : execa
|
||||
this._params = params
|
||||
this._keeper = undefined
|
||||
this._params = {
|
||||
...params,
|
||||
options: [params.options, remote.options]
|
||||
.filter(_ => _ !== undefined)
|
||||
.join(','),
|
||||
}
|
||||
this._realPath = join(
|
||||
mountsDir,
|
||||
remote.id ||
|
||||
@@ -32,19 +39,20 @@ export default class MountHandler extends LocalHandler {
|
||||
}
|
||||
|
||||
async _forget() {
|
||||
await this._execa('umount', ['--force', this._getRealPath()], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
},
|
||||
}).catch(error => {
|
||||
if (
|
||||
error == null ||
|
||||
typeof error.stderr !== 'string' ||
|
||||
!error.stderr.includes('not mounted')
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
const keeper = this._keeper
|
||||
if (keeper === undefined) {
|
||||
return
|
||||
}
|
||||
this._keeper = undefined
|
||||
await fs.close(keeper)
|
||||
|
||||
await ignoreErrors.call(
|
||||
this._execa('umount', [this._getRealPath()], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
_getRealPath() {
|
||||
@@ -52,26 +60,49 @@ export default class MountHandler extends LocalHandler {
|
||||
}
|
||||
|
||||
async _sync() {
|
||||
await fs.ensureDir(this._getRealPath())
|
||||
const { type, device, options, env } = this._params
|
||||
return this._execa(
|
||||
'mount',
|
||||
['-t', type, device, this._getRealPath(), '-o', options],
|
||||
{
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
// in case of multiple `sync`s, ensure we properly close previous keeper
|
||||
{
|
||||
const keeper = this._keeper
|
||||
if (keeper !== undefined) {
|
||||
this._keeper = undefined
|
||||
ignoreErrors.call(fs.close(keeper))
|
||||
}
|
||||
).catch(error => {
|
||||
let stderr
|
||||
if (
|
||||
error == null ||
|
||||
typeof (stderr = error.stderr) !== 'string' ||
|
||||
!(stderr.includes('already mounted') || stderr.includes('busy'))
|
||||
) {
|
||||
}
|
||||
|
||||
const realPath = this._getRealPath()
|
||||
|
||||
await fs.ensureDir(realPath)
|
||||
|
||||
try {
|
||||
const { type, device, options, env } = this._params
|
||||
await this._execa(
|
||||
'mount',
|
||||
['-t', type, device, realPath, '-o', options],
|
||||
{
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
},
|
||||
}
|
||||
)
|
||||
} catch (error) {
|
||||
try {
|
||||
// the failure may mean it's already mounted, use `findmnt` to check
|
||||
// that's the case
|
||||
await this._execa('findmnt', [realPath], {
|
||||
stdio: 'ignore',
|
||||
})
|
||||
} catch (_) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// keep an open file on the mount to prevent it from being unmounted if used
|
||||
// by another handler/process
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random()
|
||||
.toString(36)
|
||||
.slice(2)}`
|
||||
this._keeper = await fs.open(keeperPath, 'w')
|
||||
ignoreErrors.call(fs.unlink(keeperPath))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import getStream from 'get-stream'
|
||||
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
import path from 'path'
|
||||
import synchronized from 'decorator-synchronized'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
import { randomBytes } from 'crypto'
|
||||
@@ -24,6 +25,10 @@ type RemoteInfo = { used?: number, size?: number }
|
||||
type File = FileDescriptor | string
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime: number[], size: number) => {
|
||||
const seconds = hrtime[0] + hrtime[1] / 1e9
|
||||
return size / seconds
|
||||
}
|
||||
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
|
||||
@@ -34,18 +39,18 @@ const ignoreEnoent = error => {
|
||||
}
|
||||
|
||||
class PrefixWrapper {
|
||||
constructor(remote, prefix) {
|
||||
constructor(handler, prefix) {
|
||||
this._prefix = prefix
|
||||
this._remote = remote
|
||||
this._handler = handler
|
||||
}
|
||||
|
||||
get type() {
|
||||
return this._remote.type
|
||||
return this._handler.type
|
||||
}
|
||||
|
||||
// necessary to remove the prefix from the path with `prependDir` option
|
||||
async list(dir, opts) {
|
||||
const entries = await this._remote.list(this._resolve(dir), opts)
|
||||
const entries = await this._handler.list(this._resolve(dir), opts)
|
||||
if (opts != null && opts.prependDir) {
|
||||
const n = this._prefix.length
|
||||
entries.forEach((entry, i, entries) => {
|
||||
@@ -56,7 +61,7 @@ class PrefixWrapper {
|
||||
}
|
||||
|
||||
rename(oldPath, newPath) {
|
||||
return this._remote.rename(this._resolve(oldPath), this._resolve(newPath))
|
||||
return this._handler.rename(this._resolve(oldPath), this._resolve(newPath))
|
||||
}
|
||||
|
||||
_resolve(path) {
|
||||
@@ -216,6 +221,7 @@ export default class RemoteHandlerAbstract {
|
||||
// FIXME: Some handlers are implemented based on system-wide mecanisms (such
|
||||
// as mount), forgetting them might breaking other processes using the same
|
||||
// remote.
|
||||
@synchronized()
|
||||
async forget(): Promise<void> {
|
||||
await this._forget()
|
||||
}
|
||||
@@ -354,23 +360,33 @@ export default class RemoteHandlerAbstract {
|
||||
// metadata
|
||||
//
|
||||
// This method MUST ALWAYS be called before using the handler.
|
||||
@synchronized()
|
||||
async sync(): Promise<void> {
|
||||
await this._sync()
|
||||
}
|
||||
|
||||
async test(): Promise<Object> {
|
||||
const SIZE = 1024 * 1024 * 10
|
||||
const testFileName = normalizePath(`${Date.now()}.test`)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
const data = await fromCallback(cb => randomBytes(SIZE, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
const writeStart = process.hrtime()
|
||||
await this._outputFile(testFileName, data, { flags: 'wx' })
|
||||
const writeDuration = process.hrtime(writeStart)
|
||||
|
||||
step = 'read'
|
||||
const readStart = process.hrtime()
|
||||
const read = await this._readFile(testFileName, { flags: 'r' })
|
||||
const readDuration = process.hrtime(readStart)
|
||||
|
||||
if (!data.equals(read)) {
|
||||
throw new Error('output and input did not match')
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
writeRate: computeRate(writeDuration, SIZE),
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
@@ -565,7 +581,7 @@ function createPrefixWrapperMethods() {
|
||||
if (arguments.length !== 0 && typeof (path = arguments[0]) === 'string') {
|
||||
arguments[0] = this._resolve(path)
|
||||
}
|
||||
return value.apply(this._remote, arguments)
|
||||
return value.apply(this._handler, arguments)
|
||||
}
|
||||
|
||||
defineProperty(pPw, name, descriptor)
|
||||
|
||||
@@ -16,6 +16,8 @@ class TestHandler extends AbstractHandler {
|
||||
}
|
||||
}
|
||||
|
||||
jest.useFakeTimers()
|
||||
|
||||
describe('closeFile()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
const testHandler = new TestHandler({
|
||||
|
||||
@@ -290,9 +290,11 @@ handlers.forEach(url => {
|
||||
|
||||
describe('#test()', () => {
|
||||
it('tests the remote appears to be working', async () => {
|
||||
expect(await handler.test()).toEqual({
|
||||
success: true,
|
||||
})
|
||||
const answer = await handler.test()
|
||||
|
||||
expect(answer.success).toBe(true)
|
||||
expect(typeof answer.writeRate).toBe('number')
|
||||
expect(typeof answer.readRate).toBe('number')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -6,12 +6,11 @@ const DEFAULT_NFS_OPTIONS = 'vers=3'
|
||||
|
||||
export default class NfsHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { host, port, path, options } = parse(remote.url)
|
||||
const { host, port, path } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'nfs',
|
||||
device: `${host}${port !== undefined ? ':' + port : ''}:${path}`,
|
||||
options:
|
||||
DEFAULT_NFS_OPTIONS + (options !== undefined ? `,${options}` : ''),
|
||||
options: DEFAULT_NFS_OPTIONS,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -5,19 +5,13 @@ import normalizePath from './_normalizePath'
|
||||
|
||||
export default class SmbMountHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const {
|
||||
domain = 'WORKGROUP',
|
||||
host,
|
||||
options,
|
||||
password,
|
||||
path,
|
||||
username,
|
||||
} = parse(remote.url)
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(
|
||||
remote.url
|
||||
)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalizePath(path),
|
||||
options:
|
||||
`domain=${domain}` + (options !== undefined ? `,${options}` : ''),
|
||||
options: `domain=${domain}`,
|
||||
env: {
|
||||
USER: username,
|
||||
PASSWD: password,
|
||||
|
||||
@@ -75,7 +75,7 @@ catchGlobalErrors(transport)
|
||||
```js
|
||||
import transportConsole from '@xen-orchestra/log/transports/console'
|
||||
|
||||
configure(transports.console())
|
||||
configure(transportConsole())
|
||||
```
|
||||
|
||||
#### Email
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/log",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/log",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -30,7 +31,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.11.0"
|
||||
"promise-toolbox": "^0.12.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -55,7 +55,8 @@ export const required = name => {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const serializeError = error => ({
|
||||
...error,
|
||||
...error, // Copy enumerable properties.
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
name: error.name,
|
||||
stack: error.stack,
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/mixin",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@xen-orchestra/mixin",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
160
CHANGELOG.md
@@ -1,18 +1,153 @@
|
||||
# ChangeLog
|
||||
|
||||
## *next*
|
||||
## **5.33.0** (2019-03-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [SR/Disk] Disable actions on unmanaged VDIs [#3988](https://github.com/vatesfr/xen-orchestra/issues/3988) (PR [#4000](https://github.com/vatesfr/xen-orchestra/pull/4000))
|
||||
- [Pool] Specify automatic networks on a Pool [#3916](https://github.com/vatesfr/xen-orchestra/issues/3916) (PR [#3958](https://github.com/vatesfr/xen-orchestra/pull/3958))
|
||||
- [VM/advanced] Manage start delay for VM [#3909](https://github.com/vatesfr/xen-orchestra/issues/3909) (PR [#4002](https://github.com/vatesfr/xen-orchestra/pull/4002))
|
||||
- [New/Vm] SR section: Display warning message when the selected SRs aren't in the same host [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3967](https://github.com/vatesfr/xen-orchestra/pull/3967))
|
||||
- Enable compression for HTTP requests (and initial objects fetch)
|
||||
- [VDI migration] Display same-pool SRs first in the selector [#3945](https://github.com/vatesfr/xen-orchestra/issues/3945) (PR [#3996](https://github.com/vatesfr/xen-orchestra/pull/3996))
|
||||
- [Home] Save the current page in url [#3993](https://github.com/vatesfr/xen-orchestra/issues/3993) (PR [#3999](https://github.com/vatesfr/xen-orchestra/pull/3999))
|
||||
- [VDI] Ensure suspend VDI is destroyed when destroying a VM [#4027](https://github.com/vatesfr/xen-orchestra/issues/4027) (PR [#4038](https://github.com/vatesfr/xen-orchestra/pull/4038))
|
||||
- [VM/disk]: Warning when 2 VDIs are on 2 different hosts' local SRs [#3911](https://github.com/vatesfr/xen-orchestra/issues/3911) (PR [#3969](https://github.com/vatesfr/xen-orchestra/pull/3969))
|
||||
- [Remotes] Benchmarks (read and write rate speed) added when remote is tested [#3991](https://github.com/vatesfr/xen-orchestra/issues/3991) (PR [#4015](https://github.com/vatesfr/xen-orchestra/pull/4015))
|
||||
- [Cloud Config] Support both NoCloud and Config Drive 2 datasources for maximum compatibility (PR [#4053](https://github.com/vatesfr/xen-orchestra/pull/4053))
|
||||
- [Advanced] Configurable cookie validity (PR [#4059](https://github.com/vatesfr/xen-orchestra/pull/4059))
|
||||
- [Plugins] Display number of installed plugins [#4008](https://github.com/vatesfr/xen-orchestra/issues/4008) (PR [#4050](https://github.com/vatesfr/xen-orchestra/pull/4050))
|
||||
- [Continuous Replication] Opt-in mode to guess VHD size, should help with XenServer 7.1 CU2 and various `VDI_IO_ERROR` errors (PR [#3726](https://github.com/vatesfr/xen-orchestra/pull/3726))
|
||||
- [VM/Snapshots] Always delete broken quiesced snapshots [#4074](https://github.com/vatesfr/xen-orchestra/issues/4074) (PR [#4075](https://github.com/vatesfr/xen-orchestra/pull/4075))
|
||||
- [Settings/Servers] Display link to pool [#4041](https://github.com/vatesfr/xen-orchestra/issues/4041) (PR [#4045](https://github.com/vatesfr/xen-orchestra/pull/4045))
|
||||
- [Import] Change wording of drop zone (PR [#4020](https://github.com/vatesfr/xen-orchestra/pull/4020))
|
||||
- [Backup NG] Ability to set the interval of the full backups [#1783](https://github.com/vatesfr/xen-orchestra/issues/1783) (PR [#4083](https://github.com/vatesfr/xen-orchestra/pull/4083))
|
||||
- [Hosts] Display a warning icon if you have XenServer license restrictions [#4091](https://github.com/vatesfr/xen-orchestra/issues/4091) (PR [#4094](https://github.com/vatesfr/xen-orchestra/pull/4094))
|
||||
- [Restore] Ability to restore a metadata backup [#4004](https://github.com/vatesfr/xen-orchestra/issues/4004) (PR [#4023](https://github.com/vatesfr/xen-orchestra/pull/4023))
|
||||
- Improve connection to XCP-ng/XenServer hosts:
|
||||
- never disconnect by itself even in case of errors
|
||||
- never stop watching events
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [New network] PIF was wrongly required which prevented from creating a private network (PR [#4010](https://github.com/vatesfr/xen-orchestra/pull/4010))
|
||||
- [Google authentication] Migrate to new endpoint
|
||||
- [Backup NG] Better handling of huge logs [#4025](https://github.com/vatesfr/xen-orchestra/issues/4025) (PR [#4026](https://github.com/vatesfr/xen-orchestra/pull/4026))
|
||||
- [Home/VM] Bulk migration: fixed VM VDIs not migrated to the selected SR [#3986](https://github.com/vatesfr/xen-orchestra/issues/3986) (PR [#3987](https://github.com/vatesfr/xen-orchestra/pull/3987))
|
||||
- [Stats] Fix cache usage with simultaneous requests [#4017](https://github.com/vatesfr/xen-orchestra/issues/4017) (PR [#4028](https://github.com/vatesfr/xen-orchestra/pull/4028))
|
||||
- [Backup NG] Fix compression displayed for the wrong backup mode (PR [#4021](https://github.com/vatesfr/xen-orchestra/pull/4021))
|
||||
- [Home] Always sort the items by their names as a secondary sort criteria [#3983](https://github.com/vatesfr/xen-orchestra/issues/3983) (PR [#4047](https://github.com/vatesfr/xen-orchestra/pull/4047))
|
||||
- [Remotes] Fixes `spawn mount EMFILE` error during backup
|
||||
- Properly redirect to sign in page instead of being stuck in a refresh loop
|
||||
- [Backup-ng] No more false positives when list matching VMs on Home page [#4078](https://github.com/vatesfr/xen-orchestra/issues/4078) (PR [#4085](https://github.com/vatesfr/xen-orchestra/pull/4085))
|
||||
- [Plugins] Properly remove optional settings when unchecking _Fill information_ (PR [#4076](https://github.com/vatesfr/xen-orchestra/pull/4076))
|
||||
- [Patches] (PR [#4077](https://github.com/vatesfr/xen-orchestra/pull/4077))
|
||||
- Add a host to a pool: fixes the auto-patching of the host on XenServer < 7.2 [#3783](https://github.com/vatesfr/xen-orchestra/issues/3783)
|
||||
- Add a host to a pool: homogenizes both the host and **pool**'s patches [#2188](https://github.com/vatesfr/xen-orchestra/issues/2188)
|
||||
- Safely install a subset of patches on a pool [#3777](https://github.com/vatesfr/xen-orchestra/issues/3777)
|
||||
- XCP-ng: no longer requires to run `yum install xcp-ng-updater` when it's already installed [#3934](https://github.com/vatesfr/xen-orchestra/issues/3934)
|
||||
|
||||
## **5.32.2** (2019-02-28)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fix XAPI events monitoring on old version (XenServer 7.2)
|
||||
|
||||
## **5.32.1** (2019-02-28)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- Fix a very short timeout in the monitoring of XAPI events which may lead to unresponsive XenServer hosts
|
||||
|
||||
## **5.32.0** (2019-02-28)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [VM migration] Display same-pool hosts first in the selector [#3262](https://github.com/vatesfr/xen-orchestra/issues/3262) (PR [#3890](https://github.com/vatesfr/xen-orchestra/pull/3890))
|
||||
- [Home/VM] Sort VM by start time [#3955](https://github.com/vatesfr/xen-orchestra/issues/3955) (PR [#3970](https://github.com/vatesfr/xen-orchestra/pull/3970))
|
||||
- [Editable fields] Unfocusing (clicking outside) submits the change instead of canceling (PR [#3980](https://github.com/vatesfr/xen-orchestra/pull/3980))
|
||||
- [Network] Dedicated page for network creation [#3895](https://github.com/vatesfr/xen-orchestra/issues/3895) (PR [#3906](https://github.com/vatesfr/xen-orchestra/pull/3906))
|
||||
- [Logs] Add button to download the log [#3957](https://github.com/vatesfr/xen-orchestra/issues/3957) (PR [#3985](https://github.com/vatesfr/xen-orchestra/pull/3985))
|
||||
- [Continuous Replication] Share full copy between schedules [#3973](https://github.com/vatesfr/xen-orchestra/issues/3973) (PR [#3995](https://github.com/vatesfr/xen-orchestra/pull/3995))
|
||||
- [Backup] Ability to backup XO configuration and pool metadata [#808](https://github.com/vatesfr/xen-orchestra/issues/808) [#3501](https://github.com/vatesfr/xen-orchestra/issues/3501) (PR [#3912](https://github.com/vatesfr/xen-orchestra/pull/3912))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Host] Fix multipathing status for XenServer < 7.5 [#3956](https://github.com/vatesfr/xen-orchestra/issues/3956) (PR [#3961](https://github.com/vatesfr/xen-orchestra/pull/3961))
|
||||
- [Home/VM] Show creation date of the VM on if it available [#3953](https://github.com/vatesfr/xen-orchestra/issues/3953) (PR [#3959](https://github.com/vatesfr/xen-orchestra/pull/3959))
|
||||
- [Notifications] Fix invalid notifications when not registered (PR [#3966](https://github.com/vatesfr/xen-orchestra/pull/3966))
|
||||
- [Import] Fix import of some OVA files [#3962](https://github.com/vatesfr/xen-orchestra/issues/3962) (PR [#3974](https://github.com/vatesfr/xen-orchestra/pull/3974))
|
||||
- [Servers] Fix *already connected error* after a server has been removed during connection [#3976](https://github.com/vatesfr/xen-orchestra/issues/3976) (PR [#3977](https://github.com/vatesfr/xen-orchestra/pull/3977))
|
||||
- [Backup] Fix random _mount_ issues with NFS/SMB remotes [#3973](https://github.com/vatesfr/xen-orchestra/issues/3973) (PR [#4003](https://github.com/vatesfr/xen-orchestra/pull/4003))
|
||||
|
||||
### Released packages
|
||||
|
||||
- @xen-orchestra/fs v0.7.0
|
||||
- xen-api v0.24.3
|
||||
- xoa-updater v0.15.2
|
||||
- xo-server v5.36.0
|
||||
- xo-web v5.36.0
|
||||
|
||||
## **5.31.2** (2019-02-08)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Home] Set description on bulk snapshot [#3925](https://github.com/vatesfr/xen-orchestra/issues/3925) (PR [#3933](https://github.com/vatesfr/xen-orchestra/pull/3933))
|
||||
- Work-around the XenServer issue when `VBD#VDI` is an empty string instead of an opaque reference (PR [#3950](https://github.com/vatesfr/xen-orchestra/pull/3950))
|
||||
- [VDI migration] Retry when XenServer fails with `TOO_MANY_STORAGE_MIGRATES` (PR [#3940](https://github.com/vatesfr/xen-orchestra/pull/3940))
|
||||
- [VM]
|
||||
- [General] The creation date of the VM is now visible [#3932](https://github.com/vatesfr/xen-orchestra/issues/3932) (PR [#3947](https://github.com/vatesfr/xen-orchestra/pull/3947))
|
||||
- [Disks] Display device name [#3902](https://github.com/vatesfr/xen-orchestra/issues/3902) (PR [#3946](https://github.com/vatesfr/xen-orchestra/pull/3946))
|
||||
- [VM Snapshotting]
|
||||
- Detect and destroy broken quiesced snapshot left by XenServer [#3936](https://github.com/vatesfr/xen-orchestra/issues/3936) (PR [#3937](https://github.com/vatesfr/xen-orchestra/pull/3937))
|
||||
- Retry twice after a 1 minute delay if quiesce failed [#3938](https://github.com/vatesfr/xen-orchestra/issues/3938) (PR [#3952](https://github.com/vatesfr/xen-orchestra/pull/3952))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
- [Import] Fix import of big OVA files
|
||||
- [Host] Show the host's memory usage instead of the sum of the VMs' memory usage (PR [#3924](https://github.com/vatesfr/xen-orchestra/pull/3924))
|
||||
- [SAML] Make `AssertionConsumerServiceURL` matches the callback URL
|
||||
- [Backup NG] Correctly delete broken VHD chains [#3875](https://github.com/vatesfr/xen-orchestra/issues/3875) (PR [#3939](https://github.com/vatesfr/xen-orchestra/pull/3939))
|
||||
- [Remotes] Don't ignore `mount` options [#3935](https://github.com/vatesfr/xen-orchestra/issues/3935) (PR [#3931](https://github.com/vatesfr/xen-orchestra/pull/3931))
|
||||
|
||||
### Released packages
|
||||
|
||||
- xen-api v0.24.2
|
||||
- @xen-orchestra/fs v0.6.1
|
||||
- xo-server-auth-saml v0.5.3
|
||||
- xo-server v5.35.0
|
||||
- xo-web v5.35.0
|
||||
|
||||
## **5.31.0** (2019-01-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- [Backup NG] Restore logs moved to restore tab [#3772](https://github.com/vatesfr/xen-orchestra/issues/3772) (PR [#3802](https://github.com/vatesfr/xen-orchestra/pull/3802))
|
||||
- [Remotes] New SMB implementation that provides better stability and performance [#2257](https://github.com/vatesfr/xen-orchestra/issues/2257) (PR [#3708](https://github.com/vatesfr/xen-orchestra/pull/3708))
|
||||
- [VM/advanced] ACL management from VM view [#3040](https://github.com/vatesfr/xen-orchestra/issues/3727) (PR [#3040](https://github.com/vatesfr/xen-orchestra/pull/3774))
|
||||
- [VM/advanced] ACL management from VM view [#3040](https://github.com/vatesfr/xen-orchestra/issues/3040) (PR [#3774](https://github.com/vatesfr/xen-orchestra/pull/3774))
|
||||
- [VM / snapshots] Ability to save the VM memory [#3795](https://github.com/vatesfr/xen-orchestra/issues/3795) (PR [#3812](https://github.com/vatesfr/xen-orchestra/pull/3812))
|
||||
- [Backup NG / Health] Show number of lone snapshots in tab label [#3500](https://github.com/vatesfr/xen-orchestra/issues/3500) (PR [#3824](https://github.com/vatesfr/xen-orchestra/pull/3824))
|
||||
- [Login] Add autofocus on username input on login page [#3835](https://github.com/vatesfr/xen-orchestra/issues/3835) (PR [#3836](https://github.com/vatesfr/xen-orchestra/pull/3836))
|
||||
- [Home/VM] Bulk snapshot: specify snapshots' names [#3778](https://github.com/vatesfr/xen-orchestra/issues/3778) (PR [#3787](https://github.com/vatesfr/xen-orchestra/pull/3787))
|
||||
- [Remotes] Show free space and disk usage on remote [#3055](https://github.com/vatesfr/xen-orchestra/issues/3055) (PR [#3767](https://github.com/vatesfr/xen-orchestra/pull/3767))
|
||||
- [New SR] Add tooltip for reattach action button [#3845](https://github.com/vatesfr/xen-orchestra/issues/3845) (PR [#3852](https://github.com/vatesfr/xen-orchestra/pull/3852))
|
||||
- [VM migration] Display hosts' free memory [#3264](https://github.com/vatesfr/xen-orchestra/issues/3264) (PR [#3832](https://github.com/vatesfr/xen-orchestra/pull/3832))
|
||||
- [Plugins] New field to filter displayed plugins (PR [#3832](https://github.com/vatesfr/xen-orchestra/pull/3871))
|
||||
- Ability to copy ID of "unknown item"s [#3833](https://github.com/vatesfr/xen-orchestra/issues/3833) (PR [#3856](https://github.com/vatesfr/xen-orchestra/pull/3856))
|
||||
- [Cloud-Init] switch config drive type to `nocloud` to prepare for the passing of network config (PR [#3877](https://github.com/vatesfr/xen-orchestra/pull/3877))
|
||||
- [UI] Show pool name next to templates' names [#3894](https://github.com/vatesfr/xen-orchestra/issues/3894) (PR [#3896](https://github.com/vatesfr/xen-orchestra/pull/3896))
|
||||
- [Backup NG] Support zstd compression for full backups [#3773](https://github.com/vatesfr/xen-orchestra/issues/3773) (PR [#3883](https://github.com/vatesfr/xen-orchestra/pull/3883))
|
||||
- [VM] Ability to copy a VM with zstd compression [#3773](https://github.com/vatesfr/xen-orchestra/issues/3773) (PR [#3889](https://github.com/vatesfr/xen-orchestra/pull/3889))
|
||||
- [VM & Host] "Pool > Host" breadcrumb at the top of the page (PR [#3898](https://github.com/vatesfr/xen-orchestra/pull/3898))
|
||||
- [Hosts] Ability to enable/disable host multipathing [#3659](https://github.com/vatesfr/xen-orchestra/issues/3659) (PR [#3865](https://github.com/vatesfr/xen-orchestra/pull/3865))
|
||||
- [Login] Add OTP authentication [#2044](https://github.com/vatesfr/xen-orchestra/issues/2044) (PR [#3879](https://github.com/vatesfr/xen-orchestra/pull/3879))
|
||||
- [Notifications] New notification page to provide important information about XOA (PR [#3904](https://github.com/vatesfr/xen-orchestra/pull/3904))
|
||||
- [VM] Ability to export a VM with zstd compression [#3773](https://github.com/vatesfr/xen-orchestra/issues/3773) (PR [#3891](https://github.com/vatesfr/xen-orchestra/pull/3891))
|
||||
- [Host/network] Display PIF speed [#3887](https://github.com/vatesfr/xen-orchestra/issues/3887) (PR [#3901](https://github.com/vatesfr/xen-orchestra/pull/3901))
|
||||
- [SR] Display iscsi paths and mark the SR with a yellow dot if one path is not available. [#3659](https://github.com/vatesfr/xen-orchestra/issues/3659) (PR [#3829](https://github.com/vatesfr/xen-orchestra/pull/3829))
|
||||
- [UI] Unifies the Signin buttons (PR [#3913](https://github.com/vatesfr/xen-orchestra/pull/3913))
|
||||
- [Settings/remotes] NFS: display default option on placeholder [#3631](https://github.com/vatesfr/xen-orchestra/issues/3631) (PR [#3921](https://github.com/vatesfr/xen-orchestra/pull/3921))
|
||||
- [VM/advanced] Ability to pin vCPU to physical cores [#3241](https://github.com/vatesfr/xen-orchestra/issues/3241) (PR [#3254](https://github.com/vatesfr/xen-orchestra/pull/3254))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -22,15 +157,28 @@
|
||||
- [New VM] Fix `NO_HOSTS_AVAILABLE()` error when creating a VM on a local SR from template on another local SR [#3084](https://github.com/vatesfr/xen-orchestra/issues/3084) (PR [#3827](https://github.com/vatesfr/xen-orchestra/pull/3827))
|
||||
- [Backup NG] Fix typo in the form [#3854](https://github.com/vatesfr/xen-orchestra/issues/3854) (PR [#3855](https://github.com/vatesfr/xen-orchestra/pull/3855))
|
||||
- [New SR] No warning when creating a NFS SR on a path that is already used as NFS SR [#3844](https://github.com/vatesfr/xen-orchestra/issues/3844) (PR [#3851](https://github.com/vatesfr/xen-orchestra/pull/3851))
|
||||
- [New SR] No redirection if the SR creation failed or canceled [#3843](https://github.com/vatesfr/xen-orchestra/issues/3843) (PR [#3853](https://github.com/vatesfr/xen-orchestra/pull/3853))
|
||||
- [Home] Fix two tabs opened by middle click in Firefox [#3450](https://github.com/vatesfr/xen-orchestra/issues/3450) (PR [#3825](https://github.com/vatesfr/xen-orchestra/pull/3825))
|
||||
- [XOA] Enable downgrade for ending trial (PR [#3867](https://github.com/vatesfr/xen-orchestra/pull/3867))
|
||||
- [OVA import] allow import of big files [#3468](https://github.com/vatesfr/xen-orchestra/issues/3468) (PR [#3504](https://github.com/vatesfr/xen-orchestra/pull/3504))
|
||||
- [Backup NG] Smart settings not saved when editing a backup job [#3885](https://github.com/vatesfr/xen-orchestra/issues/3885) (PR [#3886](https://github.com/vatesfr/xen-orchestra/pull/3886))
|
||||
- [VM/snapshot] New snapshot with memory: fix "invalid parameters" error (PR [#3903](https://github.com/vatesfr/xen-orchestra/pull/3903))
|
||||
- [VM creation] Broken CloudInit config drive when VM created on local SR
|
||||
- [Legacy Backup] Fix error when restoring a backup
|
||||
- [Home] Fix `user.getAll` error when user is not admin [#3573](https://github.com/vatesfr/xen-orchestra/issues/3573) (PR [#3918](https://github.com/vatesfr/xen-orchestra/pull/3918))
|
||||
- [Backup NG] Fix restore issue when a disk has grown [#3910](https://github.com/vatesfr/xen-orchestra/issues/3910) (PR [#3920](https://github.com/vatesfr/xen-orchestra/pull/3920))
|
||||
- [Backup NG] Delete _importing_ VMs due to interrupted CR/DR (PR [#3923](https://github.com/vatesfr/xen-orchestra/pull/3923))
|
||||
|
||||
### Released packages
|
||||
|
||||
- vhd-lib v0.5.0
|
||||
- vhd-cli v0.2.0
|
||||
- xen-api v0.24.0
|
||||
- @xen-orchestra/fs v0.6.0
|
||||
- xo-server v5.33.0
|
||||
- xo-web v5.33.0
|
||||
- vhd-lib v0.5.1
|
||||
- xoa-updater v0.15.0
|
||||
- xen-api v0.24.1
|
||||
- xo-vmdk-to-vhd v0.1.6
|
||||
- xo-server v5.34.0
|
||||
- xo-web v5.34.0
|
||||
|
||||
## **5.30.0** (2018-12-20)
|
||||
|
||||
|
||||
10
CHANGELOG.unreleased.md
Normal file
@@ -0,0 +1,10 @@
|
||||
> This file contains all changes that have not been released yet.
|
||||
|
||||
### Enhancements
|
||||
|
||||
### Bug fixes
|
||||
|
||||
### Released packages
|
||||
|
||||
- xo-server v5.39.0
|
||||
- xo-web v5.39.0
|
||||
@@ -4,10 +4,11 @@
|
||||
|
||||
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
|
||||
- [ ] if UI changes, a screenshot has been added to the PR
|
||||
- [ ] CHANGELOG:
|
||||
- [ ] `CHANGELOG.unreleased.md`:
|
||||
- enhancement/bug fix entry added
|
||||
- list of packages to release updated (`${name} v${new version}`)
|
||||
- [ ] documentation updated
|
||||
- [ ] **I have tested added/updated features** (and impacted code)
|
||||
|
||||
### Process
|
||||
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
* [Disaster recovery](disaster_recovery.md)
|
||||
* [Smart Backup](smart_backup.md)
|
||||
* [File level Restore](file_level_restore.md)
|
||||
* [Metadata Backup](metadata_backup.md)
|
||||
* [Backup Concurrency](concurrency.md)
|
||||
* [Configure backup reports](backup_reports.md)
|
||||
* [Backup troubleshooting](backup_troubleshooting.md)
|
||||
@@ -51,6 +52,7 @@
|
||||
* [Job manager](scheduler.md)
|
||||
* [Alerts](alerts.md)
|
||||
* [Load balancing](load_balancing.md)
|
||||
* [Emergency Shutdown](emergency_shutdown.md)
|
||||
* [Auto scalability](auto_scalability.md)
|
||||
* [Forecaster](forecaster.md)
|
||||
* [Recipes](recipes.md)
|
||||
|
||||
BIN
docs/assets/cloud-init-1.png
Normal file
|
After Width: | Height: | Size: 5.8 KiB |
BIN
docs/assets/cloud-init-2.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
BIN
docs/assets/cloud-init-3.png
Normal file
|
After Width: | Height: | Size: 8.4 KiB |
BIN
docs/assets/cloud-init-4.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
docs/assets/cr-seed-1.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/assets/cr-seed-2.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/cr-seed-3.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
docs/assets/cr-seed-4.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
docs/assets/e-shutdown-1.png
Normal file
|
After Width: | Height: | Size: 2.8 KiB |
BIN
docs/assets/e-shutdown-2.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
BIN
docs/assets/e-shutdown-3.png
Normal file
|
After Width: | Height: | Size: 3.5 KiB |
@@ -12,7 +12,9 @@ Another good way to check if there is activity is the XOA VM stats view (on the
|
||||
|
||||
### VDI chain protection
|
||||
|
||||
This means your previous VM disks and snapshots should be "merged" (*coalesced* in the XenServer world) before we can take a new snapshot. This mechanism is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. Otherwise, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product dealing with this.
|
||||
Backup jobs regularly delete snapshots. When a snapshot is deleted, either manually or via a backup job, it triggers the need for Xenserver to coalesce the VDI chain - to merge the remaining VDIs and base copies in the chain. This means generally we cannot take too many new snapshots on said VM until Xenserver has finished running a coalesce job on the VDI chain.
|
||||
|
||||
This mechanism and scheduling is handled by XenServer itself, not Xen Orchestra. But we can check your existing VDI chain and avoid creating more snapshots than your storage can merge. If we don't, this will lead to catastrophic consequences. Xen Orchestra is the **only** XenServer/XCP backup product that takes this into account and offers protection.
|
||||
|
||||
Without this detection, you could have 2 potential issues:
|
||||
|
||||
@@ -21,9 +23,9 @@ Without this detection, you could have 2 potential issues:
|
||||
|
||||
The first issue is a chain that contains more than 30 elements (fixed XenServer limit), and the other one means it's full because the "coalesce" process couldn't keep up the pace and the storage filled up.
|
||||
|
||||
In the end, this message is a **protection mechanism against damaging your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
In the end, this message is a **protection mechanism preventing damage to your SR**. The backup job will fail, but XenServer itself should eventually automatically coalesce the snapshot chain, and the the next time the backup job should complete.
|
||||
|
||||
Just remember this: **coalesce will happen every time a snapshot is removed**.
|
||||
Just remember this: **a coalesce should happen every time a snapshot is removed**.
|
||||
|
||||
> You can read more on this on our dedicated blog post regarding [XenServer coalesce detection](https://xen-orchestra.com/blog/xenserver-coalesce-detection-in-xen-orchestra/).
|
||||
|
||||
@@ -33,11 +35,13 @@ Just remember this: **coalesce will happen every time a snapshot is removed**.
|
||||
|
||||
First check SMlog on the XenServer host for messages relating to VDI corruption or coalesce job failure. For example, by running `cat /var/log/SMlog | grep -i exception` or `cat /var/log/SMlog | grep -i error` on the XenServer host with the affected storage.
|
||||
|
||||
Coalesce jobs can also fail to run if the SR does not have enough free space. Check the problematic SR and make sure it has enough free space, generally 30% or more free is recommended depending on VM size.
|
||||
Coalesce jobs can also fail to run if the SR does not have enough free space. Check the problematic SR and make sure it has enough free space, generally 30% or more free is recommended depending on VM size. You can check if this is the issue by searching `SMlog` with `grep -i coales /var/log/SMlog` (you may have to look at previous logs such as `SMlog.1`).
|
||||
|
||||
You can check if a coalesce job is currently active by running `ps axf | grep vhd` on the XenServer host and looking for a VHD process in the results (one of the resulting processes will be the grep command you just ran, ignore that one).
|
||||
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
If you don't see any running coalesce jobs, and can't find any other reason that XenServer has not started one, you can attempt to make it start a coalesce job by rescanning the SR. This is harmless to try, but will not always result in a coalesce. Visit the problematic SR in the XOA UI, then click the "Rescan All Disks" button towards the top right: it looks like a refresh circle icon. This should begin the coalesce process - if you click the Advanced tab in the SR view, the "disks needing to be coalesced" list should become smaller and smaller.
|
||||
|
||||
As a last resort, migrating the VM (more specifically, its disks) to a new storage repository will also force a coalesce and solve this issue. That means migrating a VM to another host (with its own storage) and back will force the VDI chain for that VM to be coalesced, and get rid of the `VDI Chain Protection` message.
|
||||
|
||||
### Parse Error
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Backups
|
||||
|
||||
> Watch our [introduction video](https://www.youtube.com/watch?v=FfUqIwT8KzI) (45m) to Backup in Xen Orchestra!
|
||||
|
||||
This section is dedicated to all existing methods of rolling back or backing up your VMs in Xen Orchestra.
|
||||
|
||||
There are several ways to protect your VMs:
|
||||
@@ -8,6 +10,7 @@ There are several ways to protect your VMs:
|
||||
* [Rolling Snapshots](rolling_snapshots.md) [*Starter Edition*]
|
||||
* [Delta Backups](delta_backups.md) (best of both previous ones) [*Enterprise Edition*]
|
||||
* [Disaster Recovery](disaster_recovery.md) [*Enterprise Edition*]
|
||||
* [Metadata Backups](metadata_backup.md) [*Enterprise Edition*]
|
||||
* [Continuous Replication](continuous_replication.md) [*Premium Edition*]
|
||||
* [File Level Restore](file_level_restore.md) [*Premium Edition*]
|
||||
|
||||
@@ -39,7 +42,7 @@ Each backups' job execution is identified by a `runId`. You can find this `runId
|
||||
|
||||
All backup types rely on snapshots. But what about data consistency? By default, Xen Orchestra will try to take a **quiesced snapshot** every time a snapshot is done (and fall back to normal snapshots if it's not possible).
|
||||
|
||||
Snapshots of Windows VMs can be quiesced (especially MS SQL or Exchange services) after you have installed Xen Tools in your VMs. However, [there is an extra step to install the VSS provider on windows](quiesce). A quiesced snapshot means the operating system will be notified and the cache will be flushed to disks. This way, your backups will always be consistent.
|
||||
Snapshots of Windows VMs can be quiesced (especially MS SQL or Exchange services) after you have installed Xen Tools in your VMs. However, [there is an extra step to install the VSS provider on windows](https://xen-orchestra.com/blog/xenserver-quiesce-snapshots/). A quiesced snapshot means the operating system will be notified and the cache will be flushed to disks. This way, your backups will always be consistent.
|
||||
|
||||
To see if you have quiesced snapshots for a VM, just go into its snapshot tab, then the "info" icon means it is a quiesced snapshot:
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
# CloudInit
|
||||
|
||||
> CloudInit support is available in the 4.11 release and higher
|
||||
|
||||
Cloud-init is a program "that handles the early initialization of a cloud instance"[^n]. In other words, you can, on a "cloud-init"-ready template VM, pass a lot of data at first boot:
|
||||
|
||||
* setting the hostname
|
||||
@@ -18,25 +16,27 @@ So it means very easily customizing your VM when you create it from a compatible
|
||||
|
||||
You only need to use a template of a VM with CloudInit installed inside it. [Check this blog post to learn how to install CloudInit](https://xen-orchestra.com/blog/centos-cloud-template-for-xenserver/).
|
||||
|
||||
**Note:** In XOA 5.31, we changed the cloud-init config drive type from [OpenStack](https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html) to the [NoCloud](https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html) type. This will allow us to pass network configuration to VMs in the future. For 99% of users, including default cloud-init installs, this change will have no effect. However if you have previously modified your cloud-init installation in a VM template to only look for `openstack` drive types (for instance with the `datasource_list` setting in `/etc/cloud/cloud.cfg`) you need to modify it to also look for `nocloud`.
|
||||
|
||||
## Usage
|
||||
|
||||
First, select your compatible template (CloudInit ready) and name it:
|
||||
|
||||

|
||||

|
||||
|
||||
Then, activate the config drive and insert your SSH key. Or you can also use a custom CloudInit configuration:
|
||||
|
||||

|
||||

|
||||
|
||||
> CloudInit configuration examples are [available here](http://cloudinit.readthedocs.org/en/latest/topics/examples.html).
|
||||
|
||||
You can extend the disk size (**in this case, the template disk was 8 GiB originally**):
|
||||
You can extend the disk size (**in this case, the template disk was 8 GiB originally**). We'll extend it to 20GiB:
|
||||
|
||||

|
||||

|
||||
|
||||
Finally, create the VM:
|
||||
|
||||

|
||||

|
||||
|
||||
Now start the VM and SSH to its IP:
|
||||
|
||||
|
||||
@@ -4,42 +4,37 @@ Once Xen Orchestra is installed, you can configure some parameters in the config
|
||||
|
||||
## Configuration
|
||||
|
||||
The configuration file is located at `/etc/xo-server/config.yaml`.
|
||||
|
||||
**WARNING: YAML is very strict with indentation: use spaces, not tabs.**
|
||||
The configuration file is located at `/etc/xo-server/config.toml`.
|
||||
|
||||
### User to run XO-server as
|
||||
|
||||
By default, XO-server runs as 'root'. You can change that by uncommenting these lines and choose whatever user/group you want:
|
||||
|
||||
```yaml
|
||||
user: 'nobody'
|
||||
group: 'nogroup'
|
||||
```toml
|
||||
user = 'nobody'
|
||||
group = 'nogroup'
|
||||
```
|
||||
|
||||
**Warning!** A non-privileged user:
|
||||
|
||||
* can't bind to a port < 1024
|
||||
* can't mount NFS shares
|
||||
**Warning!** A non-privileged user requires the use of ``sudo`` to mount NFS shares. See [installation from the sources](from_the_sources.md).
|
||||
|
||||
### HTTP listen address and port
|
||||
|
||||
By default, XO-server listens on all addresses (0.0.0.0) and runs on port 80. If you need to, you can change this in the `# Basic HTTP` section:
|
||||
|
||||
```yaml
|
||||
host: '0.0.0.0'
|
||||
port: 80
|
||||
```toml
|
||||
host = '0.0.0.0'
|
||||
port = 80
|
||||
```
|
||||
|
||||
### HTTPS
|
||||
|
||||
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
|
||||
|
||||
```yaml
|
||||
host: '0.0.0.0'
|
||||
port: 443
|
||||
certificate: './certificate.pem'
|
||||
key: './key.pem'
|
||||
```toml
|
||||
host = '0.0.0.0'
|
||||
port = 443
|
||||
certificate = './certificate.pem'
|
||||
key = './key.pem'
|
||||
```
|
||||
|
||||
> If a chain of certificates authorities is needed, you may bundle them directly in the certificate. Note: the order of certificates does matter, your certificate should come first followed by the certificate of the above certificate authority up to the root.
|
||||
@@ -60,10 +55,9 @@ This should be written just before the `mount` option, inside the `http:` block.
|
||||
|
||||
You shouldn't have to change this. It's the path where `xo-web` files are served by `xo-server`.
|
||||
|
||||
```yaml
|
||||
mounts:
|
||||
'/':
|
||||
- '../xo-web/dist/'
|
||||
```toml
|
||||
[http.mounts]
|
||||
'/' = '../xo-web/dist/'
|
||||
```
|
||||
|
||||
### Custom certificate authority
|
||||
@@ -87,8 +81,8 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
|
||||
|
||||
By default, XO-server will try to contact Redis server on `localhost`, with the port `6379`. But you can define whatever you want:
|
||||
|
||||
```yaml
|
||||
uri: 'tcp://db:password@hostname:port'
|
||||
```toml
|
||||
uri = 'tcp://db:password@hostname:port'
|
||||
```
|
||||
|
||||
### Proxy for XenServer updates and patches
|
||||
@@ -101,12 +95,12 @@ To do that behind a corporate proxy, just add the `httpProxy` variable to match
|
||||
|
||||
You can add this at the end of your config file:
|
||||
|
||||
```yaml
|
||||
```toml
|
||||
# HTTP proxy configuration used by xo-server to fetch resources on the Internet.
|
||||
#
|
||||
# See: https://github.com/TooTallNate/node-proxy-agent#maps-proxy-protocols-to-httpagent-implementations
|
||||
|
||||
httpProxy: 'http://username:password@proxyAddress:port'
|
||||
httpProxy = 'http://username:password@proxyAddress:port'
|
||||
```
|
||||
|
||||
### Log file
|
||||
|
||||
@@ -43,11 +43,19 @@ To protect the replication, we removed the possibility to boot your copied VM di
|
||||
|
||||
### Job creation
|
||||
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, note its identifiers, the main `backupJobId` and the ID of one on the schedules for the job, `backupScheduleId`.
|
||||
Create the Continuous Replication backup job, and leave it disabled for now. On the main Backup-NG page, copy the job's `backupJobId` by hovering to the left of the shortened ID and clicking the copy to clipboard button:
|
||||
|
||||

|
||||
|
||||
Copy it somewhere temporarily. Now we need to also copy the ID of the job schedule, `backupScheduleId`. Do this by hovering over the schedule name in the same panel as before, and clicking the copy to clipboard button. Keep it with the `backupJobId` you copied previously as we will need them all later:
|
||||
|
||||

|
||||
|
||||
### Seed creation
|
||||
|
||||
Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUuid` from the snapshot panel for the VM.
|
||||
Manually create a snapshot on the VM being backed up, then copy this snapshot UUID, `snapshotUuid` from the snapshot panel of the VM:
|
||||
|
||||

|
||||
|
||||
> DO NOT ever delete or alter this snapshot, feel free to rename it to make that clear.
|
||||
|
||||
@@ -55,7 +63,9 @@ Manually create a snapshot on the VM to backup, and note its UUID as `snapshotUu
|
||||
|
||||
Export this snapshot to a file, then import it on the target SR.
|
||||
|
||||
Note the UUID of this newly created VM as `targetVmUuid`.
|
||||
We need to copy the UUID of this newly created VM as well, `targetVmUuid`:
|
||||
|
||||

|
||||
|
||||
> DO not start this VM or it will break the Continuous Replication job! You can rename this VM to more easily remember this.
|
||||
|
||||
@@ -66,7 +76,7 @@ The XOA backup system requires metadata to correctly associate the source snapsh
|
||||
First install the tool (all the following is done from the XOA VM CLI):
|
||||
|
||||
```
|
||||
npm i -g xo-cr-seed
|
||||
sudo npm i -g --unsafe-perm @xen-orchestra/cr-seed-cli
|
||||
```
|
||||
|
||||
Here is an example of how the utility expects the UUIDs and info passed to it:
|
||||
|
||||
@@ -24,10 +24,11 @@ to create a [GitHub pull request](https://help.github.com/articles/using-pull-re
|
||||
|
||||
|
||||
1. Create a branch for your work
|
||||
2. Create a pull request for this branch against the `master` branch
|
||||
3. Push into the branch until the pull request is ready to merge
|
||||
4. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
|
||||
5. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
|
||||
2. Add a summary of your changes to `CHANGELOG.md` under the `next` section, if your changes do not relate to an existing changelog item
|
||||
3. Create a pull request for this branch against the `master` branch
|
||||
4. Push into the branch until the pull request is ready to merge
|
||||
5. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
|
||||
6. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
|
||||
|
||||
### Issue triage
|
||||
|
||||
|
||||
27
docs/emergency_shutdown.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Emergency Shutdown
|
||||
|
||||
If you have a UPS for your hosts, and lose power, you may have a limited amount of time to shut down all of your VM infrastructure before the batteries run out. If you find yourself in this situation, or any other situation requiring the fast shutdown of everything, you can use the **Emergency Shutdown** feature.
|
||||
|
||||
## How to activate
|
||||
On the host view, clicking on this button will trigger the _Emergency Shutdown_ procedure:
|
||||
|
||||

|
||||
|
||||
1. **All running VMs will be suspended** (think of it like "hibernate" on your laptop: the RAM will be stored in the storage repository).
|
||||
2. Only after this is complete, the host will be halted.
|
||||
|
||||
Here, you can see the running VMs are being suspended:
|
||||
|
||||

|
||||
|
||||
And finally, that's it. They are cleanly shut down with the RAM saved to disk to be resumed later:
|
||||
|
||||

|
||||
|
||||
Now the host is halted automatically.
|
||||
|
||||
## Powering back on
|
||||
When the power outage is over, all you need to do is:
|
||||
|
||||
1. Start your host.
|
||||
2. All your VMs can be resumed, your RAM is preserved and therefore your VMs will be in the exact same state as they were before the power outage.
|
||||
@@ -6,9 +6,9 @@
|
||||
|
||||
> Please take time to read this guide carefully.
|
||||
|
||||
This installation has been validated against a fresh Debian 8 (Jessie) x64 install. It should be nearly the same on other dpkg systems. For RPM based OS's, it should be close, as most of our dependencies come from NPM and not the OS itself.
|
||||
This installation has been validated against a fresh Debian 9 (Stretch) x64 install. It should be nearly the same on other dpkg systems. For RPM based OS's, it should be close, as most of our dependencies come from NPM and not the OS itself.
|
||||
|
||||
As you may have seen,in other parts of the documentation, XO is composed of two parts: [xo-server](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server/) and [xo-web](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-web/). They can be installed separately, even on different machines, but for the sake of simplicity we will set them up together.
|
||||
As you may have seen in other parts of the documentation, XO is composed of two parts: [xo-server](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server/) and [xo-web](https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-web/). They can be installed separately, even on different machines, but for the sake of simplicity we will set them up together.
|
||||
|
||||
## Packages and Pre-requisites
|
||||
|
||||
@@ -38,24 +38,25 @@ XO needs the following packages to be installed. Redis is used as a database by
|
||||
For example, on Debian:
|
||||
|
||||
```
|
||||
apt-get install build-essential redis-server libpng-dev git python-minimal libvhdi-utils lvm2
|
||||
apt-get install build-essential redis-server libpng-dev git python-minimal libvhdi-utils lvm2 cifs-utils
|
||||
```
|
||||
|
||||
## Fetching the Code
|
||||
|
||||
You need to use the `git` source code manager to fetch the code. Ideally you should run XO as a non-root user, however if you don't run as root you will not be able to mount NFS remotes. As your chosen non-root (or root) user, run the following:
|
||||
You need to use the `git` source code manager to fetch the code. Ideally, you should run XO as a non-root user, and if you choose to, you need to set up `sudo` to be able to mount NFS remotes. As your chosen non-root (or root) user, run the following:
|
||||
|
||||
```
|
||||
git clone -b master http://github.com/vatesfr/xen-orchestra
|
||||
```
|
||||
|
||||
> Note: xo-server and xo-web have been migrated to the [xen-orchestra](https://github.com/vatesfr/xen-orchestra) mono-repository.
|
||||
> Note: xo-server and xo-web have been migrated to the [xen-orchestra](https://github.com/vatesfr/xen-orchestra) mono-repository - so you only need the single clone command above
|
||||
|
||||
## Installing dependencies
|
||||
|
||||
Once you have it, use `yarn`, as the non-root (or root) user owning the fetched code, to install the other dependencies. Enter the `xen-orchestra` directory and run the following commands:
|
||||
Now that you have the code, you can enter the `xen-orchestra` directory and use `yarn` to install other dependencies. Then finally build it using `yarn build`. Be sure to run `yarn` commands as the same user you will be using to run Xen Orchestra:
|
||||
|
||||
```
|
||||
$ cd xen-orchestra
|
||||
$ yarn
|
||||
$ yarn build
|
||||
```
|
||||
@@ -64,17 +65,15 @@ Now you have to create a config file for `xo-server`:
|
||||
|
||||
```
|
||||
$ cd packages/xo-server
|
||||
$ cp sample.config.yaml .xo-server.yaml
|
||||
$ cp sample.config.toml .xo-server.toml
|
||||
```
|
||||
|
||||
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory). It's near the end of the file:
|
||||
Edit and uncomment it to have the right path to serve `xo-web`, because `xo-server` embeds an HTTP server (we assume that `xen-orchestra` and `xo-web` are in the same directory):
|
||||
|
||||
```yaml
|
||||
mounts: '/': '../xo-web/dist/'
|
||||
```toml
|
||||
[http.mounts]
|
||||
'/' = '../xo-web/dist/'
|
||||
```
|
||||
> Note this `dist` folder will be created in the next step.
|
||||
|
||||
**WARNING: YAML is very strict with indentation: use spaces for it, not tabs**.
|
||||
|
||||
In this config file, you can also change default ports (80 and 443) for xo-server. If you are running the server as a non-root user, you will need to set the port to 1024 or higher.
|
||||
|
||||
@@ -88,7 +87,7 @@ WebServer listening on localhost:80
|
||||
|
||||
## Running XO
|
||||
|
||||
The only part you need to launch is xo-server which is quite easy to do. From the `xen-orchestra/packages/xo-server` directory, run the following:
|
||||
The only part you need to launch is xo-server, which is quite easy to do. From the `xen-orchestra/packages/xo-server` directory, run the following:
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
@@ -143,9 +142,6 @@ If you need to delete the service:
|
||||
forever-service delete orchestra
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you have problems during the building phase, follow these steps in your `xen-orchestra` directory:
|
||||
@@ -187,3 +183,17 @@ Don't forget to start redis if you don't reboot now:
|
||||
```
|
||||
service redis start
|
||||
```
|
||||
|
||||
## SUDO
|
||||
|
||||
If you are running `xo-server` as a non-root user, you need to use `sudo` to be able to mount NFS remotes. You can do this by editing `xo-server/.xo-server.toml` and setting `useSudo = true`. It's near the end of the file:
|
||||
|
||||
```
|
||||
useSudo = true
|
||||
```
|
||||
|
||||
You need to configure `sudo` to allow the user of your choice to run mount/umount commands without asking for a password. Depending on your operating system / sudo version, the location of this configuration may change. Regardless, you can use:
|
||||
|
||||
```
|
||||
username ALL=(root)NOPASSWD: /bin/mount, /bin/umount
|
||||
```
|
||||
|
||||
31
docs/metadata_backup.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Metadata backup
|
||||
|
||||
> WARNING: Metadata backup is an experimental feature. Restore is not yet available and some unexpected issues may occur.
|
||||
|
||||
## Introduction
|
||||
|
||||
XCP-ng and Citrix Hypervisor (Xenserver) hosts use a database to store metadata about VMs and their associated resources such as storage and networking. Metadata forms this complete view of all VMs available on your pool. Backing up the metadata of your pool allows you to recover from a physical hardware failure scenario in which you lose your hosts without losing your storage (SAN, NAS...).
|
||||
|
||||
In Xen Orchestra, Metadata backup is divided into two different options:
|
||||
|
||||
* Pool metadata backup
|
||||
* XO configuration backup
|
||||
|
||||
### How to use metadata backup
|
||||
|
||||
In the backup job section, when creating a new backup job, you will now have a choice between backing up VMs and backing up Metadata.
|
||||

|
||||
|
||||
When you select Metadata backup, you will have a new backup job screen, letting you choose between a pool metadata backup and an XO configuration backup (or both at the same time):
|
||||
|
||||

|
||||
|
||||
Define the name and retention for the job.
|
||||
|
||||

|
||||
|
||||
Once created, the job is displayed with the other classic jobs.
|
||||
|
||||

|
||||
|
||||
> Restore for metadata backup jobs should be available in XO 5.33
|
||||
13
package.json
@@ -4,10 +4,10 @@
|
||||
"@babel/register": "^7.0.0",
|
||||
"babel-core": "^7.0.0-0",
|
||||
"babel-eslint": "^10.0.1",
|
||||
"babel-jest": "^23.0.1",
|
||||
"babel-jest": "^24.1.0",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^5.1.0",
|
||||
"eslint-config-prettier": "^3.3.0",
|
||||
"eslint-config-prettier": "^4.1.0",
|
||||
"eslint-config-standard": "12.0.0",
|
||||
"eslint-config-standard-jsx": "^6.0.2",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
@@ -16,13 +16,13 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^4.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.89.0",
|
||||
"globby": "^8.0.0",
|
||||
"flow-bin": "^0.95.1",
|
||||
"globby": "^9.0.0",
|
||||
"husky": "^1.2.1",
|
||||
"jest": "^23.0.1",
|
||||
"jest": "^24.1.0",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
@@ -34,7 +34,6 @@
|
||||
}
|
||||
},
|
||||
"jest": {
|
||||
"timers": "fake",
|
||||
"collectCoverage": true,
|
||||
"projects": [
|
||||
"<rootDir>"
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/complex-matcher",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/complex-matcher",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/value-matcher",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/value-matcher",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.2.0",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-cli",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/vhd-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -26,12 +27,12 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^0.6.0",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"cli-progress": "^2.0.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"getopts": "^2.2.3",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.5.0"
|
||||
"vhd-lib": "^0.6.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
@@ -41,7 +42,7 @@
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
|
||||
33
packages/vhd-cli/src/commands/repl.js
Normal file
@@ -0,0 +1,33 @@
|
||||
import { asCallback, fromCallback, fromEvent } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { relative } from 'path'
|
||||
import { start as createRepl } from 'repl'
|
||||
import Vhd, * as vhdLib from 'vhd-lib'
|
||||
|
||||
export default async args => {
|
||||
const cwd = process.cwd()
|
||||
const handler = getHandler({ url: 'file://' + cwd })
|
||||
await handler.sync()
|
||||
try {
|
||||
const repl = createRepl({
|
||||
prompt: 'vhd> ',
|
||||
})
|
||||
Object.assign(repl.context, vhdLib)
|
||||
repl.context.handler = handler
|
||||
repl.context.open = path => new Vhd(handler, relative(cwd, path))
|
||||
|
||||
// Make the REPL waits for promise completion.
|
||||
repl.eval = (evaluate => (cmd, context, filename, cb) => {
|
||||
asCallback.call(
|
||||
fromCallback(cb => {
|
||||
evaluate.call(repl, cmd, context, filename, cb)
|
||||
}).then(value => (Array.isArray(value) ? Promise.all(value) : value)),
|
||||
cb
|
||||
)
|
||||
})(repl.eval)
|
||||
|
||||
await fromEvent(repl, 'exit')
|
||||
} finally {
|
||||
await handler.forget()
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
|
||||
import pkg from '../package.json'
|
||||
|
||||
import commands from './commands'
|
||||
|
||||
function runCommand(commands, [command, ...args]) {
|
||||
@@ -16,9 +18,11 @@ function runCommand(commands, [command, ...args]) {
|
||||
return `Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}`
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}
|
||||
|
||||
vhd-cli ${pkg.version}`
|
||||
}
|
||||
|
||||
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
|
||||
|
||||
@@ -1,38 +1,40 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
|
||||
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
|
||||
|
||||
import { SECTOR_SIZE } from './src/_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile(name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
async function createRandomFile(name, sizeMB) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = createRandomStream(sizeMB * 1024 * 1024)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
async function checkFile(vhdName) {
|
||||
@@ -53,31 +55,35 @@ async function convertFromRawToVhd(rawName, vhdName) {
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const vhd = new Vhd(handler, emptyFileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
await checkFile(emptyFileName)
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
@@ -85,9 +91,10 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const recoveredFile = await fs.open(recoveredFileName, 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
const vhd2 = new Vhd(handler, emptyFileName)
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
@@ -107,7 +114,7 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
await fs.createReadStream(recoveredFileName, {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
@@ -117,27 +124,33 @@ test('the BAT MSB is not used for sign', async () => {
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
@@ -146,18 +159,21 @@ test('writeData in 2 non-overlaping operations', async () => {
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', emptyFileName, mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile(rawFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, emptyFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
@@ -167,119 +183,138 @@ test('writeData in 2 overlaping operations', async () => {
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler.getSize(rawFileName)
|
||||
const newVhd = new Vhd(handler, vhdFileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await recoverRawContent(vhdFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const emptyFileName = `${tempDir}/empty.vhd`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(rawFileName, mbOfRandom)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
emptyFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const originalSize = await handler._getSize(rawFileName)
|
||||
await chainVhd(handler, emptyFileName, handler, vhdFileName, true)
|
||||
await checkFile(vhdFileName)
|
||||
await checkFile(emptyFileName)
|
||||
await vhdMerge(handler, emptyFileName, handler, vhdFileName)
|
||||
await recoverRawContent(emptyFileName, recoveredFileName, originalSize)
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(rawFileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
const randomFileName = `${tempDir}/randomfile`
|
||||
const random2FileName = `${tempDir}/randomfile2`
|
||||
const smallRandomFileName = `${tempDir}/small_randomfile`
|
||||
const parentFileName = `${tempDir}/parent.vhd`
|
||||
const child1FileName = `${tempDir}/child1.vhd`
|
||||
const child2FileName = `${tempDir}/child2.vhd`
|
||||
const recoveredFileName = `${tempDir}/recovered`
|
||||
await createRandomFile(randomFileName, mbOfRandom)
|
||||
await createRandomFile(smallRandomFileName, Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
parentFileName,
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await convertFromRawToVhd(randomFileName, child1FileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
await execa('vhd-util', [
|
||||
'snapshot',
|
||||
'-n',
|
||||
child2FileName,
|
||||
'-p',
|
||||
child1FileName,
|
||||
])
|
||||
const vhd = new Vhd(handler, child2FileName)
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
const originalSize = await handler._getSize(randomFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child1FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child1FileName])
|
||||
await chainVhd(handler, child1FileName, handler, child2FileName, true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', child2FileName])
|
||||
const smallRandom = await fs.readFile(smallRandomFileName)
|
||||
const newVhd = new Vhd(handler, child2FileName)
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
await checkFile(child2FileName)
|
||||
await checkFile(child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child1FileName)
|
||||
await checkFile(parentFileName)
|
||||
await chainVhd(handler, parentFileName, handler, child2FileName, true)
|
||||
await checkFile(child2FileName)
|
||||
await vhdMerge(handler, parentFileName, handler, child2FileName)
|
||||
await checkFile(parentFileName)
|
||||
await recoverRawContent(parentFileName, recoveredFileName, originalSize)
|
||||
await execa('cp', [randomFileName, random2FileName])
|
||||
const fd = await fs.open(random2FileName, 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
expect(await fs.readFile(recoveredFileName)).toEqual(
|
||||
await fs.readFile(random2FileName)
|
||||
)
|
||||
})
|
||||
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
test.only('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
const expectedVhdSize = 4197888
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = await createSyntheticStream(handler, 'randomfile.vhd')
|
||||
expect(stream.length).toEqual(expectedVhdSize)
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdFileName = `${tempDir}/randomfile.vhd`
|
||||
const recoveredVhdFileName = `${tempDir}/recovered.vhd`
|
||||
await createRandomFile(rawFileName, initalSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdFileName)
|
||||
await checkFile(vhdFileName)
|
||||
const handler = getHandler({ url: 'file://' })
|
||||
const stream = await createSyntheticStream(handler, vhdFileName)
|
||||
const expectedVhdSize = (await fs.stat(vhdFileName)).size
|
||||
expect(stream.length).toEqual((await fs.stat(vhdFileName)).size)
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, fs.createWriteStream(recoveredVhdFileName), cb)
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
const stats = await fs.stat('recovered.vhd')
|
||||
await checkFile(recoveredVhdFileName)
|
||||
const stats = await fs.stat(recoveredVhdFileName)
|
||||
expect(stats.size).toEqual(expectedVhdSize)
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
await execa('qemu-img', ['compare', recoveredVhdFileName, rawFileName])
|
||||
})
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.5.0",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/vhd-lib",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -21,11 +22,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"core-js": "3.0.0-beta.3",
|
||||
"core-js": "3.0.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
@@ -34,13 +35,14 @@
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"@xen-orchestra/fs": "^0.6.0",
|
||||
"@xen-orchestra/fs": "^0.8.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^1.0.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"get-stream": "^4.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"readable-stream": "^3.0.6",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
|
||||
20
packages/vhd-lib/src/_checkFooter.js
Normal file
@@ -0,0 +1,20 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import {
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
export default footer => {
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE)
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
}
|
||||
14
packages/vhd-lib/src/_checkHeader.js
Normal file
@@ -0,0 +1,14 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { HEADER_COOKIE, HEADER_VERSION, SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default (header, footer) => {
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
if (footer !== undefined) {
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
}
|
||||
}
|
||||
47
packages/vhd-lib/src/_getFirstAndLastBlocks.js
Normal file
@@ -0,0 +1,47 @@
|
||||
import assert from 'assert'
|
||||
|
||||
import { BLOCK_UNUSED } from './_constants'
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
export default bat => {
|
||||
const n = bat.length
|
||||
assert.notStrictEqual(n, 0)
|
||||
assert.strictEqual(n % 4, 0)
|
||||
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (j === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (j < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
50
packages/vhd-lib/src/_readChunk.js
Normal file
@@ -0,0 +1,50 @@
|
||||
export default async function readChunk(stream, n) {
|
||||
if (n === 0) {
|
||||
return Buffer.alloc(0)
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let i = 0
|
||||
|
||||
function clean() {
|
||||
stream.removeListener('readable', onReadable)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
|
||||
function resolve2() {
|
||||
clean()
|
||||
resolve(Buffer.concat(chunks, i))
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve2()
|
||||
clean()
|
||||
}
|
||||
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
|
||||
function onReadable() {
|
||||
const chunk = stream.read(n - i)
|
||||
if (chunk === null) {
|
||||
return // wait for more data
|
||||
}
|
||||
i += chunk.length
|
||||
chunks.push(chunk)
|
||||
if (i >= n) {
|
||||
resolve2()
|
||||
}
|
||||
}
|
||||
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
|
||||
if (stream.readable) {
|
||||
onReadable()
|
||||
}
|
||||
})
|
||||
}
|
||||
93
packages/vhd-lib/src/createVhdStreamWithLength.integ.spec.js
Normal file
@@ -0,0 +1,93 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import rimraf from 'rimraf'
|
||||
import getStream from 'get-stream'
|
||||
import tmp from 'tmp'
|
||||
import { createReadStream, createWriteStream } from 'fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createVhdStreamWithLength } from '.'
|
||||
import { FOOTER_SIZE } from './_constants'
|
||||
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
async function convertFromRawToVhd(rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
|
||||
async function createRandomFile(name, size) {
|
||||
const createRandomStream = asyncIteratorToStream(function*(size) {
|
||||
while (size-- > 0) {
|
||||
yield Buffer.from([Math.floor(Math.random() * 256)])
|
||||
}
|
||||
})
|
||||
const input = await createRandomStream(size)
|
||||
await pFromCallback(cb => pipeline(input, fs.createWriteStream(name), cb))
|
||||
}
|
||||
|
||||
test('createVhdStreamWithLength can extract length', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
})
|
||||
|
||||
test('createVhdStreamWithLength can skip blank after last block and before footer', async () => {
|
||||
const initialSize = 4 * 1024
|
||||
const rawFileName = `${tempDir}/randomfile`
|
||||
const vhdName = `${tempDir}/randomfile.vhd`
|
||||
const outputVhdName = `${tempDir}/output.vhd`
|
||||
await createRandomFile(rawFileName, initialSize)
|
||||
await convertFromRawToVhd(rawFileName, vhdName)
|
||||
const vhdSize = fs.statSync(vhdName).size
|
||||
// read file footer
|
||||
const footer = await getStream.buffer(
|
||||
createReadStream(vhdName, { start: vhdSize - FOOTER_SIZE })
|
||||
)
|
||||
|
||||
// we'll override the footer
|
||||
const endOfFile = await createWriteStream(vhdName, {
|
||||
flags: 'r+',
|
||||
start: vhdSize - FOOTER_SIZE,
|
||||
})
|
||||
// write a blank over the previous footer
|
||||
await pFromCallback(cb => endOfFile.write(Buffer.alloc(FOOTER_SIZE), cb))
|
||||
// write the footer after the new blank
|
||||
await pFromCallback(cb => endOfFile.end(footer, cb))
|
||||
const longerSize = fs.statSync(vhdName).size
|
||||
// check input file has been lengthened
|
||||
expect(longerSize).toEqual(vhdSize + FOOTER_SIZE)
|
||||
const result = await createVhdStreamWithLength(
|
||||
await createReadStream(vhdName)
|
||||
)
|
||||
expect(result.length).toEqual(vhdSize)
|
||||
const outputFileStream = await createWriteStream(outputVhdName)
|
||||
await pFromCallback(cb => pipeline(result, outputFileStream, cb))
|
||||
const outputSize = fs.statSync(outputVhdName).size
|
||||
// check out file has been shortened again
|
||||
expect(outputSize).toEqual(vhdSize)
|
||||
await execa('qemu-img', ['compare', outputVhdName, vhdName])
|
||||
})
|
||||
80
packages/vhd-lib/src/createVhdStreamWithLength.js
Normal file
@@ -0,0 +1,80 @@
|
||||
import assert from 'assert'
|
||||
import { pipeline, Transform } from 'readable-stream'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import noop from './_noop'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import readChunk from './_readChunk'
|
||||
import { FOOTER_SIZE, HEADER_SIZE, SECTOR_SIZE } from './_constants'
|
||||
import { fuFooter, fuHeader } from './_structs'
|
||||
|
||||
class EndCutterStream extends Transform {
|
||||
constructor(footerOffset, footerBuffer) {
|
||||
super()
|
||||
this._footerOffset = footerOffset
|
||||
this._footerBuffer = footerBuffer
|
||||
this._position = 0
|
||||
this._done = false
|
||||
}
|
||||
|
||||
_transform(data, encoding, callback) {
|
||||
if (!this._done) {
|
||||
if (this._position + data.length >= this._footerOffset) {
|
||||
this._done = true
|
||||
const difference = this._footerOffset - this._position
|
||||
data = data.slice(0, difference)
|
||||
this.push(data)
|
||||
this.push(this._footerBuffer)
|
||||
} else {
|
||||
this.push(data)
|
||||
}
|
||||
this._position += data.length
|
||||
}
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
||||
export default async function createVhdStreamWithLength(stream) {
|
||||
const readBuffers = []
|
||||
let streamPosition = 0
|
||||
|
||||
async function readStream(length) {
|
||||
const chunk = await readChunk(stream, length)
|
||||
assert.strictEqual(chunk.length, length)
|
||||
streamPosition += chunk.length
|
||||
readBuffers.push(chunk)
|
||||
return chunk
|
||||
}
|
||||
|
||||
const footerBuffer = await readStream(FOOTER_SIZE)
|
||||
const footer = fuFooter.unpack(footerBuffer)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = fuHeader.unpack(await readStream(HEADER_SIZE))
|
||||
checkHeader(header, footer)
|
||||
|
||||
await readStream(header.tableOffset - streamPosition)
|
||||
|
||||
const table = await readStream(header.maxTableEntries * 4)
|
||||
|
||||
readBuffers.reverse()
|
||||
for (const buf of readBuffers) {
|
||||
stream.unshift(buf)
|
||||
}
|
||||
|
||||
const footerOffset =
|
||||
getFirstAndLastBlocks(table).lastSector * SECTOR_SIZE +
|
||||
Math.ceil(header.blockSize / SECTOR_SIZE / 8 / SECTOR_SIZE) * SECTOR_SIZE +
|
||||
header.blockSize
|
||||
|
||||
// ignore any data after footerOffset and push footerBuffer
|
||||
//
|
||||
// this is necessary to ignore any blank space between the last block and the
|
||||
// final footer which would invalidate the size we computed
|
||||
const newStream = new EndCutterStream(footerOffset, footerBuffer)
|
||||
pipeline(stream, newStream, noop)
|
||||
|
||||
newStream.length = footerOffset + FOOTER_SIZE
|
||||
return newStream
|
||||
}
|
||||
@@ -11,3 +11,6 @@ export {
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
export {
|
||||
default as createVhdStreamWithLength,
|
||||
} from './createVhdStreamWithLength'
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
import assert from 'assert'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import checkFooter from './_checkFooter'
|
||||
import checkHeader from './_checkHeader'
|
||||
import constantStream from './_constant-stream'
|
||||
import getFirstAndLastBlocks from './_getFirstAndLastBlocks'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
@@ -170,21 +167,10 @@ export default class Vhd {
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
checkFooter(footer)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
checkHeader(header, footer)
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
@@ -216,7 +202,9 @@ export default class Vhd {
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry(block) {
|
||||
return this.blockTable.readUInt32BE(block * 4)
|
||||
const i = block * 4
|
||||
const { blockTable } = this
|
||||
return i < blockTable.length ? blockTable.readUInt32BE(i) : BLOCK_UNUSED
|
||||
}
|
||||
|
||||
_readBlock(blockId, onlyBitmap = false) {
|
||||
@@ -240,49 +228,6 @@ export default class Vhd {
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks() {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
@@ -309,7 +254,9 @@ export default class Vhd {
|
||||
|
||||
async _freeFirstBlockSpace(spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const { first, firstSector, lastSector } = getFirstAndLastBlocks(
|
||||
this.blockTable
|
||||
)
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
|
||||
@@ -4,22 +4,20 @@ import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromEvent, pFromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
|
||||
import { createReadableRawStream, createReadableSparseStream } from './'
|
||||
|
||||
import { createFooter } from './src/_createFooterHeader'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
let tempDir = null
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
@@ -55,9 +53,10 @@ test('ReadableRawVHDStream does not crash', async () => {
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await pFromCallback(cb =>
|
||||
pipeline(stream, createWriteStream(`${tempDir}/output.vhd`), cb)
|
||||
)
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
@@ -87,9 +86,9 @@ test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
pipeline(stream, createWriteStream(`${tempDir}/outputStream`), err =>
|
||||
err ? reject(err) : resolve()
|
||||
)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
@@ -114,19 +113,19 @@ test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
blocks
|
||||
)
|
||||
expect(stream.length).toEqual(4197888)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
const pipe = stream.pipe(createWriteStream(`${tempDir}/output.vhd`))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', `${tempDir}/output.vhd`])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
`${tempDir}/output.vhd`,
|
||||
`${tempDir}/out1.raw`,
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const out1 = await readFile(`${tempDir}/out1.raw`)
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xapi-explore-sr",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xapi-explore-sr",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -40,7 +41,7 @@
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.17.4",
|
||||
"pw": "^0.0.4",
|
||||
"xen-api": "^0.24.0"
|
||||
"xen-api": "^0.25.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.1.5",
|
||||
|
||||
@@ -95,7 +95,7 @@ root@xen1.company.net> xapi.pool.$master.name_label
|
||||
To ease searches, `find()` and `findAll()` functions are available:
|
||||
|
||||
```
|
||||
root@xen1.company.net> findAll({ $type: 'vm' }).length
|
||||
root@xen1.company.net> findAll({ $type: 'VM' }).length
|
||||
183
|
||||
```
|
||||
|
||||
|
||||
@@ -6,35 +6,18 @@ const createProgress = require('progress-stream')
|
||||
const createTop = require('process-top')
|
||||
const defer = require('golike-defer').default
|
||||
const getopts = require('getopts')
|
||||
const humanFormat = require('human-format')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
|
||||
const { createClient } = require('../')
|
||||
|
||||
const {
|
||||
createOutputStream,
|
||||
formatProgress,
|
||||
pipeline,
|
||||
resolveRecord,
|
||||
throttle,
|
||||
} = require('./utils')
|
||||
|
||||
const formatSize = bytes => humanFormat(bytes, { scale: 'binary', unit: 'B' })
|
||||
|
||||
function Progress$toString() {
|
||||
return [
|
||||
formatSize(this.transferred),
|
||||
' / ',
|
||||
formatSize(this.length),
|
||||
' | ',
|
||||
this.runtime,
|
||||
's / ',
|
||||
this.eta,
|
||||
's | ',
|
||||
formatSize(this.speed),
|
||||
'/s',
|
||||
].join('')
|
||||
}
|
||||
|
||||
defer(async ($defer, rawArgs) => {
|
||||
const { raw, throttle: bps, _: args } = getopts(rawArgs, {
|
||||
boolean: 'raw',
|
||||
@@ -80,7 +63,7 @@ defer(async ($defer, rawArgs) => {
|
||||
$defer(
|
||||
clearInterval,
|
||||
setInterval(() => {
|
||||
console.warn('\r %s | %s', top.toString(), Progress$toString.call(progressStream.progress()))
|
||||
console.warn('\r %s | %s', top.toString(), formatProgress(progressStream.progress()))
|
||||
}, 1e3)
|
||||
)
|
||||
|
||||
|
||||
@@ -2,15 +2,25 @@
|
||||
|
||||
process.env.DEBUG = '*'
|
||||
|
||||
const createProgress = require('progress-stream')
|
||||
const defer = require('golike-defer').default
|
||||
const pump = require('pump')
|
||||
const { CancelToken, fromCallback } = require('promise-toolbox')
|
||||
const getopts = require('getopts')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
|
||||
const { createClient } = require('../')
|
||||
|
||||
const { createOutputStream, resolveRef } = require('./utils')
|
||||
const {
|
||||
createOutputStream,
|
||||
formatProgress,
|
||||
pipeline,
|
||||
resolveRecord,
|
||||
} = require('./utils')
|
||||
|
||||
defer(async ($defer, rawArgs) => {
|
||||
const { gzip, zstd, _: args } = getopts(rawArgs, {
|
||||
boolean: ['gzip', 'zstd'],
|
||||
})
|
||||
|
||||
defer(async ($defer, args) => {
|
||||
if (args.length < 2) {
|
||||
return console.log('Usage: export-vm <XS URL> <VM identifier> [<XVA file>]')
|
||||
}
|
||||
@@ -18,7 +28,7 @@ defer(async ($defer, args) => {
|
||||
const xapi = createClient({
|
||||
allowUnauthorized: true,
|
||||
url: args[0],
|
||||
watchEvents: false
|
||||
watchEvents: false,
|
||||
})
|
||||
|
||||
await xapi.connect()
|
||||
@@ -30,18 +40,16 @@ defer(async ($defer, args) => {
|
||||
// https://xapi-project.github.io/xen-api/importexport.html
|
||||
const exportStream = await xapi.getResource(token, '/export/', {
|
||||
query: {
|
||||
ref: await resolveRef(xapi, 'VM', args[1]),
|
||||
use_compression: 'true'
|
||||
}
|
||||
ref: (await resolveRecord(xapi, 'VM', args[1])).$ref,
|
||||
use_compression: zstd ? 'zstd' : gzip ? 'true' : 'false',
|
||||
},
|
||||
})
|
||||
|
||||
console.warn('Export task:', exportStream.headers['task-id'])
|
||||
|
||||
await fromCallback(cb => pump(
|
||||
await pipeline(
|
||||
exportStream,
|
||||
createOutputStream(args[2]),
|
||||
cb
|
||||
))
|
||||
})(process.argv.slice(2)).catch(
|
||||
console.error.bind(console, 'error')
|
||||
)
|
||||
createProgress({ time: 1e3 }, p => console.warn(formatProgress(p))),
|
||||
createOutputStream(args[2])
|
||||
)
|
||||
})(process.argv.slice(2)).catch(console.error.bind(console, 'error'))
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
const { createReadStream, createWriteStream, statSync } = require('fs')
|
||||
const { fromCallback } = require('promise-toolbox')
|
||||
const { PassThrough, pipeline } = require('readable-stream')
|
||||
const humanFormat = require('human-format')
|
||||
const Throttle = require('throttle')
|
||||
|
||||
const { isOpaqueRef } = require('../')
|
||||
const isOpaqueRef = require('../dist/_isOpaqueRef').default
|
||||
|
||||
exports.createInputStream = path => {
|
||||
if (path === undefined || path === '-') {
|
||||
@@ -28,6 +29,23 @@ exports.createOutputStream = path => {
|
||||
return stream
|
||||
}
|
||||
|
||||
const formatSizeOpts = { scale: 'binary', unit: 'B' }
|
||||
const formatSize = bytes => humanFormat(bytes, formatSizeOpts)
|
||||
|
||||
exports.formatProgress = p =>
|
||||
[
|
||||
formatSize(p.transferred),
|
||||
' / ',
|
||||
formatSize(p.length),
|
||||
' | ',
|
||||
p.runtime,
|
||||
's / ',
|
||||
p.eta,
|
||||
's | ',
|
||||
formatSize(p.speed),
|
||||
'/s',
|
||||
].join('')
|
||||
|
||||
exports.pipeline = (...streams) => {
|
||||
return fromCallback(cb => {
|
||||
streams = streams.filter(_ => _ != null)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.24.0",
|
||||
"version": "0.25.0",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
@@ -13,6 +13,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xen-api",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xen-api",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -36,16 +37,15 @@
|
||||
"debug": "^4.0.1",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"http-request-plus": "^0.6.0",
|
||||
"iterable-backoff": "^0.0.0",
|
||||
"jest-diff": "^23.5.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"jest-diff": "^24.0.0",
|
||||
"json-rpc-protocol": "^0.13.1",
|
||||
"kindof": "^2.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"make-error": "^1.3.0",
|
||||
"minimist": "^1.2.0",
|
||||
"ms": "^2.1.1",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pw": "0.0.4",
|
||||
"xmlrpc": "^1.3.2",
|
||||
"xo-collection": "^0.4.1"
|
||||
@@ -53,7 +53,10 @@
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-class-properties": "^7.3.4",
|
||||
"@babel/plugin-proposal-decorators": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.2.0",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.2.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
|
||||
30
packages/xen-api/src/_XapiError.js
Normal file
@@ -0,0 +1,30 @@
|
||||
import { BaseError } from 'make-error'
|
||||
|
||||
export default class XapiError extends BaseError {
|
||||
static wrap(error) {
|
||||
let code, params
|
||||
if (Array.isArray(error)) {
|
||||
// < XenServer 7.3
|
||||
;[code, ...params] = error
|
||||
} else {
|
||||
code = error.message
|
||||
params = error.data
|
||||
if (!Array.isArray(params)) {
|
||||
params = []
|
||||
}
|
||||
}
|
||||
return new XapiError(code, params)
|
||||
}
|
||||
|
||||
constructor(code, params) {
|
||||
super(`${code}(${params.join(', ')})`)
|
||||
|
||||
this.code = code
|
||||
this.params = params
|
||||
|
||||
// slots than can be assigned later
|
||||
this.call = undefined
|
||||
this.url = undefined
|
||||
this.task = undefined
|
||||
}
|
||||
}
|
||||
15
packages/xen-api/src/_coalesceCalls.js
Normal file
@@ -0,0 +1,15 @@
|
||||
// decorates fn so that more than one concurrent calls will be coalesced
|
||||
export default function coalesceCalls(fn) {
|
||||
let promise
|
||||
const clean = () => {
|
||||
promise = undefined
|
||||
}
|
||||
return function() {
|
||||
if (promise !== undefined) {
|
||||
return promise
|
||||
}
|
||||
promise = fn.apply(this, arguments)
|
||||
promise.then(clean, clean)
|
||||
return promise
|
||||
}
|
||||
}
|
||||
26
packages/xen-api/src/_coalesceCalls.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
|
||||
import coalesceCalls from './_coalesceCalls'
|
||||
|
||||
describe('coalesceCalls', () => {
|
||||
it('decorates an async function', async () => {
|
||||
const fn = coalesceCalls(promise => promise)
|
||||
|
||||
const defer1 = pDefer()
|
||||
const promise1 = fn(defer1.promise)
|
||||
const defer2 = pDefer()
|
||||
const promise2 = fn(defer2.promise)
|
||||
|
||||
defer1.resolve('foo')
|
||||
expect(await promise1).toBe('foo')
|
||||
expect(await promise2).toBe('foo')
|
||||
|
||||
const defer3 = pDefer()
|
||||
const promise3 = fn(defer3.promise)
|
||||
|
||||
defer3.resolve('bar')
|
||||
expect(await promise3).toBe('bar')
|
||||
})
|
||||
})
|
||||
3
packages/xen-api/src/_debug.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import debug from 'debug'
|
||||
|
||||
export default debug('xen-api')
|
||||
22
packages/xen-api/src/_getTaskResult.js
Normal file
@@ -0,0 +1,22 @@
|
||||
import { Cancel } from 'promise-toolbox'
|
||||
|
||||
import XapiError from './_XapiError'
|
||||
|
||||
export default task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
const error = XapiError.wrap(task.error_info)
|
||||
error.task = task
|
||||
return Promise.reject(error)
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
3
packages/xen-api/src/_isGetAllRecordsMethod.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const SUFFIX = '.get_all_records'
|
||||
|
||||
export default method => method.endsWith(SUFFIX)
|
||||
3
packages/xen-api/src/_isOpaqueRef.js
Normal file
@@ -0,0 +1,3 @@
|
||||
const PREFIX = 'OpaqueRef:'
|
||||
|
||||
export default value => typeof value === 'string' && value.startsWith(PREFIX)
|
||||
4
packages/xen-api/src/_isReadOnlyCall.js
Normal file
@@ -0,0 +1,4 @@
|
||||
const RE = /^[^.]+\.get_/
|
||||
|
||||
export default (method, args) =>
|
||||
args.length === 1 && typeof args[0] === 'string' && RE.test(method)
|
||||
8
packages/xen-api/src/_makeCallSetting.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export default (setting, defaultValue) =>
|
||||
setting === undefined
|
||||
? () => defaultValue
|
||||
: typeof setting === 'function'
|
||||
? setting
|
||||
: typeof setting === 'object'
|
||||
? method => setting[method] ?? setting['*'] ?? defaultValue
|
||||
: () => setting
|
||||
18
packages/xen-api/src/_parseUrl.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const URL_RE = /^(?:(https?:)\/*)?(?:([^:]+):([^@]+)@)?([^/]+?)(?::([0-9]+))?\/?$/
|
||||
|
||||
export default url => {
|
||||
const matches = URL_RE.exec(url)
|
||||
if (matches === null) {
|
||||
throw new Error('invalid URL: ' + url)
|
||||
}
|
||||
|
||||
const [, protocol = 'https:', username, password, hostname, port] = matches
|
||||
const parsedUrl = { protocol, hostname, port }
|
||||
if (username !== undefined) {
|
||||
parsedUrl.username = decodeURIComponent(username)
|
||||
}
|
||||
if (password !== undefined) {
|
||||
parsedUrl.password = decodeURIComponent(password)
|
||||
}
|
||||
return parsedUrl
|
||||
}
|
||||
3
packages/xen-api/src/transports/_UnsupportedTransport.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export default makeError('UnsupportedTransport')
|
||||
25
packages/xen-api/src/transports/_prepareXmlRpcParams.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
// - cast integers to strings
|
||||
export default function prepare(param) {
|
||||
if (Number.isInteger(param)) {
|
||||
return String(param)
|
||||
}
|
||||
|
||||
if (typeof param !== 'object' || param === null) {
|
||||
return param
|
||||
}
|
||||
|
||||
if (Array.isArray(param)) {
|
||||
return param.map(prepare)
|
||||
}
|
||||
|
||||
const values = {}
|
||||
Object.keys(param).forEach(key => {
|
||||
const value = param[key]
|
||||
if (value !== undefined) {
|
||||
values[key] = prepare(value)
|
||||
}
|
||||
})
|
||||
return values
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
import makeError from 'make-error'
|
||||
|
||||
export const UnsupportedTransport = makeError('UnsupportedTransport')
|
||||
@@ -1,7 +1,7 @@
|
||||
import jsonRpc from './json-rpc'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
import xmlRpc from './xml-rpc'
|
||||
import xmlRpcJson from './xml-rpc-json'
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
|
||||
const factories = [jsonRpc, xmlRpcJson, xmlRpc]
|
||||
const { length } = factories
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import httpRequestPlus from 'http-request-plus'
|
||||
import { format, parse } from 'json-rpc-protocol'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
// https://github.com/xenserver/xenadmin/blob/0df39a9d83cd82713f32d24704852a0fd57b8a64/XenModel/XenAPI/Session.cs#L403-L433
|
||||
export default ({ allowUnauthorized, url }) => {
|
||||
return (method, args) =>
|
||||
httpRequestPlus
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import { UnsupportedTransport } from './_utils'
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
import UnsupportedTransport from './_UnsupportedTransport'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
@@ -71,10 +72,7 @@ const parseResult = result => {
|
||||
throw new UnsupportedTransport()
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
path: '/json',
|
||||
@@ -83,5 +81,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import { createClient, createSecureClient } from 'xmlrpc'
|
||||
import { promisify } from 'promise-toolbox'
|
||||
|
||||
import prepareXmlRpcParams from './_prepareXmlRpcParams'
|
||||
|
||||
const logError = error => {
|
||||
if (error.res) {
|
||||
console.error(
|
||||
@@ -30,10 +32,7 @@ const parseResult = result => {
|
||||
return result.Value
|
||||
}
|
||||
|
||||
export default ({
|
||||
allowUnauthorized,
|
||||
url: { hostname, path, port, protocol },
|
||||
}) => {
|
||||
export default ({ allowUnauthorized, url: { hostname, port, protocol } }) => {
|
||||
const client = (protocol === 'https:' ? createSecureClient : createClient)({
|
||||
host: hostname,
|
||||
port,
|
||||
@@ -41,5 +40,6 @@ export default ({
|
||||
})
|
||||
const call = promisify(client.methodCall, client)
|
||||
|
||||
return (method, args) => call(method, args).then(parseResult, logError)
|
||||
return (method, args) =>
|
||||
call(method, prepareXmlRpcParams(args)).then(parseResult, logError)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-acl-resolver",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-acl-resolver",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-cli",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-cli",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -33,7 +34,7 @@
|
||||
"chalk": "^2.2.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"fs-promise": "^2.0.3",
|
||||
"http-request-plus": "^0.6.0",
|
||||
"http-request-plus": "^0.8.0",
|
||||
"human-format": "^0.10.0",
|
||||
"l33teral": "^3.0.3",
|
||||
"lodash": "^4.17.4",
|
||||
@@ -42,7 +43,7 @@
|
||||
"nice-pipe": "0.0.0",
|
||||
"pretty-ms": "^4.0.0",
|
||||
"progress-stream": "^2.0.0",
|
||||
"promise-toolbox": "^0.11.0",
|
||||
"promise-toolbox": "^0.12.1",
|
||||
"pump": "^3.0.0",
|
||||
"pw": "^0.0.4",
|
||||
"strip-indent": "^2.0.0",
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-collection",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-collection",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import kindOf from 'kindof'
|
||||
import { BaseError } from 'make-error'
|
||||
import { EventEmitter } from 'events'
|
||||
import { forEach } from 'lodash'
|
||||
import { forOwn } from 'lodash'
|
||||
|
||||
import isEmpty from './is-empty'
|
||||
import isObject from './is-object'
|
||||
@@ -10,6 +10,7 @@ import isObject from './is-object'
|
||||
|
||||
const {
|
||||
create: createObject,
|
||||
keys,
|
||||
prototype: { hasOwnProperty },
|
||||
} = Object
|
||||
|
||||
@@ -63,6 +64,16 @@ export class NoSuchItem extends BaseError {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const assertValidKey = key => {
|
||||
if (!isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
const isValidKey = key => typeof key === 'number' || typeof key === 'string'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export default class Collection extends EventEmitter {
|
||||
constructor() {
|
||||
super()
|
||||
@@ -71,7 +82,7 @@ export default class Collection extends EventEmitter {
|
||||
this._buffering = 0
|
||||
this._indexes = createObject(null)
|
||||
this._indexedItems = createObject(null)
|
||||
this._items = {} // createObject(null)
|
||||
this._items = createObject(null)
|
||||
this._size = 0
|
||||
}
|
||||
|
||||
@@ -113,7 +124,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
clear() {
|
||||
forEach(this._items, (_, key) => this._remove(key))
|
||||
keys(this._items).forEach(this._remove, this)
|
||||
}
|
||||
|
||||
remove(keyOrObjectWithId) {
|
||||
@@ -176,8 +187,7 @@ export default class Collection extends EventEmitter {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// Throws a NoSuchItem.
|
||||
this._assertHas(key)
|
||||
throw new NoSuchItem(key)
|
||||
}
|
||||
|
||||
has(key) {
|
||||
@@ -189,7 +199,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
createIndex(name, index) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (hasOwnProperty.call(indexes, name)) {
|
||||
throw new DuplicateIndex(name)
|
||||
}
|
||||
@@ -201,7 +211,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
deleteIndex(name) {
|
||||
const { _indexes: indexes } = this
|
||||
const indexes = this._indexes
|
||||
if (!hasOwnProperty.call(indexes, name)) {
|
||||
throw new NoSuchIndex(name)
|
||||
}
|
||||
@@ -218,7 +228,7 @@ export default class Collection extends EventEmitter {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
*[Symbol.iterator]() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield [key, items[key]]
|
||||
@@ -226,7 +236,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*keys() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield key
|
||||
@@ -234,7 +244,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
*values() {
|
||||
const { _items: items } = this
|
||||
const items = this._items
|
||||
|
||||
for (const key in items) {
|
||||
yield items[key]
|
||||
@@ -255,11 +265,11 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
called = true
|
||||
|
||||
if (--this._buffering) {
|
||||
if (--this._buffering !== 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const { _buffer: buffer } = this
|
||||
const buffer = this._buffer
|
||||
|
||||
// Due to deduplication there could be nothing in the buffer.
|
||||
if (isEmpty(buffer)) {
|
||||
@@ -276,7 +286,7 @@ export default class Collection extends EventEmitter {
|
||||
data[buffer[key]][key] = this._items[key]
|
||||
}
|
||||
|
||||
forEach(data, (items, action) => {
|
||||
forOwn(data, (items, action) => {
|
||||
if (!isEmpty(items)) {
|
||||
this.emit(action, items)
|
||||
}
|
||||
@@ -306,16 +316,6 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
_assertValidKey(key) {
|
||||
if (!this._isValidKey(key)) {
|
||||
throw new InvalidKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
_isValidKey(key) {
|
||||
return typeof key === 'number' || typeof key === 'string'
|
||||
}
|
||||
|
||||
_remove(key) {
|
||||
delete this._items[key]
|
||||
this._size--
|
||||
@@ -324,17 +324,17 @@ export default class Collection extends EventEmitter {
|
||||
|
||||
_resolveItem(keyOrObjectWithId, valueIfKey = undefined) {
|
||||
if (valueIfKey !== undefined) {
|
||||
this._assertValidKey(keyOrObjectWithId)
|
||||
assertValidKey(keyOrObjectWithId)
|
||||
|
||||
return [keyOrObjectWithId, valueIfKey]
|
||||
}
|
||||
|
||||
if (this._isValidKey(keyOrObjectWithId)) {
|
||||
if (isValidKey(keyOrObjectWithId)) {
|
||||
return [keyOrObjectWithId]
|
||||
}
|
||||
|
||||
const key = this.getKey(keyOrObjectWithId)
|
||||
this._assertValidKey(key)
|
||||
assertValidKey(key)
|
||||
|
||||
return [key, keyOrObjectWithId]
|
||||
}
|
||||
@@ -347,7 +347,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
|
||||
if (action === ACTION_ADD) {
|
||||
this._buffer[key] = this._buffer[key] ? ACTION_UPDATE : ACTION_ADD
|
||||
this._buffer[key] = key in this._buffer ? ACTION_UPDATE : ACTION_ADD
|
||||
} else if (action === ACTION_REMOVE) {
|
||||
if (this._buffer[key] === ACTION_ADD) {
|
||||
delete this._buffer[key]
|
||||
@@ -356,7 +356,7 @@ export default class Collection extends EventEmitter {
|
||||
}
|
||||
} else {
|
||||
// update
|
||||
if (!this._buffer[key]) {
|
||||
if (!(key in this._buffer)) {
|
||||
this._buffer[key] = ACTION_UPDATE
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-common",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-common",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-import-servers-csv",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-import-servers-csv",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -42,7 +43,7 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^10.12.2",
|
||||
"@types/node": "^11.11.4",
|
||||
"@types/through2": "^2.0.31",
|
||||
"tslint": "^5.9.1",
|
||||
"tslint-config-standard": "^8.0.1",
|
||||
|
||||
@@ -1,8 +1,3 @@
|
||||
declare module 'csv-parser' {
|
||||
function csvParser(opts?: Object): any
|
||||
export = csvParser
|
||||
}
|
||||
|
||||
declare module 'exec-promise' {
|
||||
function execPromise(cb: (args: string[]) => any): void
|
||||
export = execPromise
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-lib",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-remote-parser",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-remote-parser",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -9,11 +9,7 @@ same identifier.
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-server-auth-github):
|
||||
|
||||
```
|
||||
> npm install --global xo-server-auth-github
|
||||
```
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-github",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-auth-github",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
|
||||
@@ -9,11 +9,7 @@ same identifier.
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-server-auth-google):
|
||||
|
||||
```
|
||||
> npm install --global xo-server-auth-google
|
||||
```
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-google",
|
||||
"version": "0.2.0",
|
||||
"version": "0.2.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Google authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -15,6 +15,7 @@
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/xo-server-auth-google",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "packages/xo-server-auth-google",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
@@ -32,7 +33,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"passport-google-oauth20": "^1.0.0"
|
||||
"passport-google-oauth20": "^2.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
|
||||
@@ -9,11 +9,7 @@ same identifier.
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/xo-server-auth-ldap):
|
||||
|
||||
```
|
||||
> npm install --global xo-server-auth-ldap
|
||||
```
|
||||
For installing XO and the plugins from the sources, please take a look at [the documentation](https://xen-orchestra.com/docs/from_the_sources.html).
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||