Compare commits
246 Commits
xo-web-v5.
...
xo-acl-res
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37f71bb36c | ||
|
|
2ed4b7ad3f | ||
|
|
7eb970f22a | ||
|
|
13db4a8411 | ||
|
|
49a7a89bbf | ||
|
|
0af8a60c1c | ||
|
|
e1650b376c | ||
|
|
873b40cc70 | ||
|
|
d45265b180 | ||
|
|
ff50b2848e | ||
|
|
d67fae22ab | ||
|
|
d809002558 | ||
|
|
5c30559d15 | ||
|
|
cbb5b011e1 | ||
|
|
f5bff408a8 | ||
|
|
d7cfe4d3dc | ||
|
|
7be8f38c6b | ||
|
|
08a7e605ce | ||
|
|
4b57db5893 | ||
|
|
8b1ae3f3c9 | ||
|
|
77d35a5928 | ||
|
|
323d409e6c | ||
|
|
9f2f2b7b69 | ||
|
|
b44fa7beca | ||
|
|
6d4e310b8e | ||
|
|
6726530229 | ||
|
|
8351352541 | ||
|
|
3f9e8d79ea | ||
|
|
685f2328bd | ||
|
|
746567a8a7 | ||
|
|
c116c41c42 | ||
|
|
3768a7de37 | ||
|
|
11ef0ee54f | ||
|
|
33ae531e3a | ||
|
|
8cc9924751 | ||
|
|
c329ab863b | ||
|
|
41820ea316 | ||
|
|
bf00f80716 | ||
|
|
9baf0c74e4 | ||
|
|
b59ccdf26f | ||
|
|
9cae978923 | ||
|
|
311d914b96 | ||
|
|
592cb4ef9e | ||
|
|
ec2db7f2d0 | ||
|
|
71eab7ba9b | ||
|
|
5e07171d60 | ||
|
|
3f73e3d964 | ||
|
|
0ebe78b4a2 | ||
|
|
61c3379298 | ||
|
|
44866f3316 | ||
|
|
4bb8ce8779 | ||
|
|
58eb6a8b5f | ||
|
|
52f6a79e01 | ||
|
|
129f79d44b | ||
|
|
385c3eb563 | ||
|
|
e56be51b45 | ||
|
|
24ae65b254 | ||
|
|
d5dffbacbd | ||
|
|
c6ae969a82 | ||
|
|
005a9fdc01 | ||
|
|
f505d4d911 | ||
|
|
8ada6b121e | ||
|
|
b9a87efb0d | ||
|
|
89485a82d2 | ||
|
|
451f87c6b4 | ||
|
|
c3cb5a3221 | ||
|
|
458609ed2e | ||
|
|
fcec8113f3 | ||
|
|
ebbd882ee4 | ||
|
|
0506e19a66 | ||
|
|
ecc62e4f54 | ||
|
|
2b95eb4e4d | ||
|
|
bcde9e0f74 | ||
|
|
114501ebc7 | ||
|
|
ebab7c0867 | ||
|
|
0e2270fb6e | ||
|
|
593493ec0c | ||
|
|
d92898a806 | ||
|
|
7890e46551 | ||
|
|
ef942a6209 | ||
|
|
fdde916388 | ||
|
|
31314d201b | ||
|
|
a29a949c51 | ||
|
|
cc1ce8c5f8 | ||
|
|
a21bf4ebe5 | ||
|
|
3d0420dbd9 | ||
|
|
04c74dd30f | ||
|
|
2f256291ae | ||
|
|
bcb66a4145 | ||
|
|
2d9368062e | ||
|
|
b110bacf61 | ||
|
|
78afdc0af5 | ||
|
|
ad6cd7985a | ||
|
|
a61661776d | ||
|
|
1a9ebddcab | ||
|
|
7ab907a854 | ||
|
|
68a34f7cdb | ||
|
|
da4ff3082d | ||
|
|
9c05a59b5f | ||
|
|
6780146505 | ||
|
|
2758833fc6 | ||
|
|
2786d7ec46 | ||
|
|
945a2006c9 | ||
|
|
b9e574e32f | ||
|
|
34f1ef1680 | ||
|
|
4ac4310bc1 | ||
|
|
a10997ca66 | ||
|
|
0e52a4c7dc | ||
|
|
a4b3e22c2b | ||
|
|
441bd7c754 | ||
|
|
ddbd32d1cb | ||
|
|
a5b0cbeaea | ||
|
|
c6f3b2b1ce | ||
|
|
3d869d9fa1 | ||
|
|
7a5229741f | ||
|
|
78e0c2d8fa | ||
|
|
5928984069 | ||
|
|
61a472f108 | ||
|
|
e45f78ea20 | ||
|
|
b3ae9d88eb | ||
|
|
f7f26537be | ||
|
|
96848fc6d4 | ||
|
|
51e6f0c79f | ||
|
|
4f94ad40b7 | ||
|
|
43e1eb9939 | ||
|
|
1f6d7de861 | ||
|
|
bd623c2daf | ||
|
|
40c71c2102 | ||
|
|
72a1580eff | ||
|
|
9e2404a0d7 | ||
|
|
7dd84d1518 | ||
|
|
d800db5d09 | ||
|
|
2714ccff38 | ||
|
|
1d493e411b | ||
|
|
2a0c222f2d | ||
|
|
641d68de0e | ||
|
|
2dd0fd660b | ||
|
|
bb5441c7bc | ||
|
|
eeea9e662b | ||
|
|
8d4874e240 | ||
|
|
a8ba4a1a8e | ||
|
|
0c027247ec | ||
|
|
164cb39c1b | ||
|
|
52503de645 | ||
|
|
83b8b5de61 | ||
|
|
3e326c4e62 | ||
|
|
a6b0690416 | ||
|
|
dcd007c5c7 | ||
|
|
eb090e4874 | ||
|
|
4b716584f7 | ||
|
|
4bc348f39f | ||
|
|
9c75992fe4 | ||
|
|
4bb2702ac5 | ||
|
|
ea8133cb41 | ||
|
|
fc40c7b03d | ||
|
|
7fe5b66fdb | ||
|
|
0f1d052493 | ||
|
|
56a182f795 | ||
|
|
e8da1b943b | ||
|
|
3913b0eba1 | ||
|
|
7990e45095 | ||
|
|
a7068ec166 | ||
|
|
55b35ac0cf | ||
|
|
a251f8ca75 | ||
|
|
172ce2c7a1 | ||
|
|
3cef668a75 | ||
|
|
e6deb29070 | ||
|
|
51609d45a2 | ||
|
|
5cb6dc6d92 | ||
|
|
c5174a61b7 | ||
|
|
93e987982c | ||
|
|
fc421428fd | ||
|
|
7400bd657a | ||
|
|
da62cba3f8 | ||
|
|
461cc7e547 | ||
|
|
b898ed4785 | ||
|
|
149530e73f | ||
|
|
7e627c953e | ||
|
|
bc86984f19 | ||
|
|
e40f3acdd4 | ||
|
|
63d93224e0 | ||
|
|
c87356c319 | ||
|
|
74f4a83aea | ||
|
|
e67038a04d | ||
|
|
1fa73b57a2 | ||
|
|
73c746fdd3 | ||
|
|
ab1413b741 | ||
|
|
c087eaf229 | ||
|
|
8b9f9ffa3e | ||
|
|
a83fa90d87 | ||
|
|
505f06c1d8 | ||
|
|
2ac1093543 | ||
|
|
b3d8ce2041 | ||
|
|
b47789bf82 | ||
|
|
0a5e1a9bce | ||
|
|
f333679319 | ||
|
|
20d3faa306 | ||
|
|
cf11ed0830 | ||
|
|
acd390ac42 | ||
|
|
8a2fbe3ab5 | ||
|
|
7a6e7ec153 | ||
|
|
7d90346c91 | ||
|
|
abb5193ced | ||
|
|
52e845834e | ||
|
|
c1c17fad44 | ||
|
|
d7b4025893 | ||
|
|
934356571c | ||
|
|
738d98eb42 | ||
|
|
7e689076d8 | ||
|
|
0b9d031965 | ||
|
|
53f470518b | ||
|
|
664d648435 | ||
|
|
0d718bd632 | ||
|
|
ed5e0c3509 | ||
|
|
20d5047b55 | ||
|
|
4cfe3ec06e | ||
|
|
87664ff16a | ||
|
|
adf278fc83 | ||
|
|
a4d0fa62d2 | ||
|
|
ff59d091f1 | ||
|
|
4cac99d79a | ||
|
|
d1a046279d | ||
|
|
cb9fa5c42b | ||
|
|
05f9e6895b | ||
|
|
63b5ee6f96 | ||
|
|
36d2de049f | ||
|
|
86b0d5e2b7 | ||
|
|
d34f641130 | ||
|
|
39d7b4c7bd | ||
|
|
ad0d4156fb | ||
|
|
80187e2789 | ||
|
|
89e25c9b81 | ||
|
|
ca51d59815 | ||
|
|
433f445e99 | ||
|
|
474a765e1b | ||
|
|
7d4b17380d | ||
|
|
b58b1d94cd | ||
|
|
16e7257e3b | ||
|
|
ca1a46f980 | ||
|
|
596bd12f59 | ||
|
|
301ab65c01 | ||
|
|
35f210e074 | ||
|
|
c239b518e0 | ||
|
|
f45935aa44 | ||
|
|
782505b292 | ||
|
|
1368e3b86d |
@@ -12,9 +12,11 @@ module.exports = {
|
||||
parser: 'babel-eslint',
|
||||
rules: {
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
indent: 'off',
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
'react/jsx-indent': 'off',
|
||||
},
|
||||
}
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,6 +8,8 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
|
||||
12
.travis.yml
12
.travis.yml
@@ -1,12 +1,18 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- stable
|
||||
#- stable # disable for now due to an issue of indirect dep upath with Node 9
|
||||
- 8
|
||||
- 6
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
@@ -14,3 +20,7 @@ before_install:
|
||||
|
||||
cache:
|
||||
yarn: true
|
||||
|
||||
script:
|
||||
- yarn run test
|
||||
- yarn run test-integration
|
||||
|
||||
@@ -1,40 +1,56 @@
|
||||
'use strict'
|
||||
|
||||
const PLUGINS_RE = /^(?:@babel\/plugin-.+|babel-plugin-lodash)$/
|
||||
const PLUGINS_RE = /^(?:@babel\/|babel-)plugin-.+$/
|
||||
const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const configs = {
|
||||
'@babel/plugin-proposal-decorators': {
|
||||
legacy: true,
|
||||
},
|
||||
'@babel/preset-env' (pkg) {
|
||||
return {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined
|
||||
? {}
|
||||
: typeof config === 'function'
|
||||
? config(...args)
|
||||
: config
|
||||
}
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
presets === undefined && (presets = {})
|
||||
presets['@babel/preset-env'] = {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = {}
|
||||
plugins[name] = getConfig(name, pkg)
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = {}
|
||||
presets[name] = getConfig(name, pkg)
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/babel-config",
|
||||
"version": "0.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/babel-config",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.3",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -14,7 +14,7 @@
|
||||
"scheduling",
|
||||
"task"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -41,10 +41,10 @@
|
||||
"moment-timezone": "^0.5.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -41,4 +41,8 @@ describe('next()', () => {
|
||||
'no solutions found for this schedule'
|
||||
)
|
||||
})
|
||||
|
||||
it('select the first sunday of the month', () => {
|
||||
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -176,10 +176,10 @@ export default createParser({
|
||||
range: [0, 11],
|
||||
},
|
||||
{
|
||||
aliases: 'mon tue wen thu fri sat sun'.split(' '),
|
||||
aliases: 'sun mon tue wen thu fri sat'.split(' '),
|
||||
name: 'dayOfWeek',
|
||||
post: value => (value === 0 ? 7 : value),
|
||||
range: [1, 7],
|
||||
post: value => (value === 7 ? 0 : value),
|
||||
range: [0, 6],
|
||||
},
|
||||
],
|
||||
presets: {
|
||||
|
||||
@@ -23,7 +23,7 @@ describe('parse()', () => {
|
||||
|
||||
it('correctly parse days', () => {
|
||||
expect(parse('* * * * mon,sun')).toEqual({
|
||||
dayOfWeek: [1, 7],
|
||||
dayOfWeek: [0, 1],
|
||||
})
|
||||
})
|
||||
|
||||
@@ -40,10 +40,10 @@ describe('parse()', () => {
|
||||
|
||||
it('dayOfWeek: 0 and 7 bind to sunday', () => {
|
||||
expect(parse('* * * * 0')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
expect(parse('* * * * 7')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
3
@xen-orchestra/fs/.babelrc.js
Normal file
3
@xen-orchestra/fs/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
54
@xen-orchestra/fs/package.json
Normal file
54
@xen-orchestra/fs/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.0.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.49",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"execa": "^0.10.0",
|
||||
"fs-extra": "^6.0.1",
|
||||
"get-stream": "^3.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"xo-remote-parser": "^0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.49",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
// @flow
|
||||
|
||||
import getStream from 'get-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { type Readable, type Writable } from 'stream'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
|
||||
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
type Data = Buffer | Readable | string
|
||||
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
async test (): Promise<Object> {
|
||||
const testFileName = `${Date.now()}.test`
|
||||
const data = getPseudoRandomBytes(1024 * 1024)
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
await this.outputFile(testFileName, data)
|
||||
@@ -97,7 +97,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this.createReadStream(file, options).then(streamToBuffer)
|
||||
return this.createReadStream(file, options).then(getStream.buffer)
|
||||
}
|
||||
|
||||
async rename (
|
||||
@@ -126,7 +126,10 @@ export default class RemoteHandlerAbstract {
|
||||
prependDir = false,
|
||||
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
): Promise<string[]> {
|
||||
const entries = await this._list(dir)
|
||||
let entries = await this._list(dir)
|
||||
if (filter !== undefined) {
|
||||
entries = entries.filter(filter)
|
||||
}
|
||||
|
||||
if (prependDir) {
|
||||
entries.forEach((entry, i) => {
|
||||
@@ -134,7 +137,7 @@ export default class RemoteHandlerAbstract {
|
||||
})
|
||||
}
|
||||
|
||||
return filter === undefined ? entries : entries.filter(filter)
|
||||
return entries
|
||||
}
|
||||
|
||||
async _list (dir: string): Promise<string[]> {
|
||||
@@ -7,6 +7,9 @@ import { defer, fromEvent } from 'promise-toolbox'
|
||||
import { invert } from 'lodash'
|
||||
import { type Readable, type Transform } from 'stream'
|
||||
|
||||
// Format: $<algorithm>$<salt>$<encrypted>
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ALGORITHM_TO_ID = {
|
||||
md5: '1',
|
||||
sha256: '5',
|
||||
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '.'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test("fs test doesn't crash", async () => {
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const result = await handler.test()
|
||||
expect(result.success).toBeTruthy()
|
||||
})
|
||||
26
@xen-orchestra/fs/src/index.js
Normal file
26
@xen-orchestra/fs/src/index.js
Normal file
@@ -0,0 +1,26 @@
|
||||
// @flow
|
||||
|
||||
import type RemoteHandler from './abstract'
|
||||
import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
|
||||
export type { default as RemoteHandler } from './abstract'
|
||||
export type Remote = { url: string }
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
smb: RemoteHandlerSmb,
|
||||
nfs: RemoteHandlerNfs,
|
||||
}
|
||||
|
||||
export const getHandler = (remote: Remote): RemoteHandler => {
|
||||
// FIXME: should be done in xo-remote-parser.
|
||||
const type = remote.url.split('://')[0]
|
||||
|
||||
const Handler = HANDLERS[type]
|
||||
if (!Handler) {
|
||||
throw new Error('Unhandled remote type')
|
||||
}
|
||||
return new Handler(remote)
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
import Smb2 from '@marsaud/smb2-promise'
|
||||
import { lastly as pFinally } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { noop, pFinally } from '../utils'
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const normalizeError = error => {
|
||||
102
CHANGELOG.md
102
CHANGELOG.md
@@ -1,9 +1,104 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.20.0** (planned 2018-05-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
|
||||
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
|
||||
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
|
||||
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
|
||||
|
||||
### Bugs
|
||||
|
||||
## **5.19.0** (2018-05-01)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
|
||||
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
|
||||
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
|
||||
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
|
||||
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
|
||||
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
|
||||
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
|
||||
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
|
||||
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
|
||||
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
|
||||
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
|
||||
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
|
||||
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
|
||||
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
|
||||
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
|
||||
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
|
||||
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
|
||||
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
|
||||
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
|
||||
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
|
||||
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
|
||||
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
|
||||
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
|
||||
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
|
||||
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
|
||||
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
|
||||
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
|
||||
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
|
||||
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
|
||||
|
||||
## **5.18.0** (2018-03-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
|
||||
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
|
||||
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
|
||||
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
|
||||
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
|
||||
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
|
||||
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
|
||||
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
|
||||
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
|
||||
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
|
||||
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
|
||||
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
|
||||
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
|
||||
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
|
||||
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
|
||||
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
|
||||
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
|
||||
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
|
||||
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
|
||||
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
|
||||
|
||||
|
||||
### Bugs
|
||||
|
||||
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
|
||||
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
|
||||
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
|
||||
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
|
||||
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
|
||||
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
|
||||
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
|
||||
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
|
||||
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
|
||||
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
|
||||
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
|
||||
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
|
||||
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
|
||||
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
|
||||
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
|
||||
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
|
||||
|
||||
## **5.17.0** (2018-03-02)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
|
||||
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
|
||||
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
|
||||
@@ -22,6 +117,9 @@
|
||||
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
|
||||
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
|
||||
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
|
||||
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
|
||||
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
|
||||
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -41,6 +139,7 @@
|
||||
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
|
||||
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
|
||||
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
|
||||
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
|
||||
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
|
||||
@@ -50,6 +149,8 @@
|
||||
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
|
||||
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
|
||||
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -79,6 +180,7 @@
|
||||
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
|
||||
|
||||
### Bugs
|
||||
|
||||
|
||||
16
PULL_REQUEST_TEMPLATE.md
Normal file
16
PULL_REQUEST_TEMPLATE.md
Normal file
@@ -0,0 +1,16 @@
|
||||
### Check list
|
||||
|
||||
- [ ] if UI changes, a screenshot has been added to the PR
|
||||
- [ ] CHANGELOG updated
|
||||
- [ ] documentation updated
|
||||
|
||||
### Process
|
||||
|
||||
1. create a PR as soon as possible
|
||||
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
|
||||
1. when you want a review, add a reviewer
|
||||
1. if necessary, update your PR, and readd a reviewer
|
||||
|
||||
### List of packages to release
|
||||
|
||||
> No need to mention xo-server and xo-web.
|
||||
5
babel.config.js
Normal file
5
babel.config.js
Normal file
@@ -0,0 +1,5 @@
|
||||
module.exports = {
|
||||
// Necessary for jest to be able to find the `.babelrc.js` closest to the file
|
||||
// instead of only the one in this directory.
|
||||
babelrcRoots: true,
|
||||
}
|
||||
6
flow-typed/limit-concurrency-decorator.js
vendored
Normal file
6
flow-typed/limit-concurrency-decorator.js
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
declare module 'limit-concurrency-decorator' {
|
||||
declare function limitConcurrencyDecorator(
|
||||
concurrency: number
|
||||
): <T: Function>(T) => T
|
||||
declare export default typeof limitConcurrencyDecorator
|
||||
}
|
||||
13
flow-typed/lodash.js
vendored
13
flow-typed/lodash.js
vendored
@@ -1,4 +1,12 @@
|
||||
declare module 'lodash' {
|
||||
declare export function forEach<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: (V, K) => void
|
||||
): void
|
||||
declare export function groupBy<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: K | ((V, K) => string)
|
||||
): { [string]: V[] }
|
||||
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
|
||||
declare export function isEmpty(mixed): boolean
|
||||
declare export function keyBy<T>(array: T[], iteratee: string): boolean
|
||||
@@ -12,5 +20,10 @@ declare module 'lodash' {
|
||||
iteratee: (V1, K) => V2
|
||||
): { [K]: V2 }
|
||||
declare export function noop(...args: mixed[]): void
|
||||
declare export function some<T>(
|
||||
collection: T[],
|
||||
iteratee: (T, number) => boolean
|
||||
): boolean
|
||||
declare export function sum(values: number[]): number
|
||||
declare export function values<K, V>(object: { [K]: V }): V[]
|
||||
}
|
||||
|
||||
5
flow-typed/promise-toolbox.js
vendored
5
flow-typed/promise-toolbox.js
vendored
@@ -3,8 +3,11 @@ declare module 'promise-toolbox' {
|
||||
declare export function defer<T>(): {|
|
||||
promise: Promise<T>,
|
||||
reject: T => void,
|
||||
resolve: T => void
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromCallback<T>(
|
||||
(cb: (error: any, value: T) => void) => void
|
||||
): Promise<T>
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
|
||||
26
package.json
26
package.json
@@ -1,8 +1,10 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@babel/register": "^7.0.0-beta.40",
|
||||
"babel-7-jest": "^21.3.2",
|
||||
"@babel/core": "^7.0.0-beta.49",
|
||||
"@babel/register": "^7.0.0-beta.49",
|
||||
"babel-core": "^7.0.0-0",
|
||||
"babel-eslint": "^8.1.2",
|
||||
"babel-jest": "^23.0.1",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^4.14.0",
|
||||
"eslint-config-standard": "^11.0.0-beta.0",
|
||||
@@ -13,37 +15,30 @@
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^3.0.1",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.67.1",
|
||||
"flow-bin": "^0.73.0",
|
||||
"globby": "^8.0.0",
|
||||
"husky": "^0.14.3",
|
||||
"jest": "^22.0.4",
|
||||
"jest": "^23.0.1",
|
||||
"lodash": "^4.17.4",
|
||||
"prettier": "^1.10.2",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"sorted-object": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"yarn": "^1.2.1"
|
||||
"yarn": "^1.7.0"
|
||||
},
|
||||
"jest": {
|
||||
"collectCoverage": true,
|
||||
"projects": [
|
||||
"<rootDir>",
|
||||
"<rootDir>/packages/xo-web"
|
||||
"<rootDir>"
|
||||
],
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-vmdk-to-vhd/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"transform": {
|
||||
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
@@ -52,12 +47,13 @@
|
||||
"build": "scripts/run-script --parallel build",
|
||||
"clean": "scripts/run-script --parallel clean",
|
||||
"dev": "scripts/run-script --parallel dev",
|
||||
"dev-test": "jest --bail --watch",
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"posttest": "scripts/run-script test",
|
||||
"precommit": "scripts/lint-staged",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"test": "jest"
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\""
|
||||
},
|
||||
"workspaces": [
|
||||
"@xen-orchestra/*",
|
||||
|
||||
@@ -30,9 +30,9 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -28,10 +28,10 @@
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
3
packages/vhd-cli/.babelrc.js
Normal file
3
packages/vhd-cli/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.0.0",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -23,45 +23,32 @@
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"@nraynaud/xo-fs": "^0.0.5",
|
||||
"babel-runtime": "^6.22.0",
|
||||
"exec-promise": "^0.7.0"
|
||||
"@xen-orchestra/fs": "^0.0.1",
|
||||
"exec-promise": "^0.7.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"@babel/cli": "^7.0.0-beta.49",
|
||||
"@babel/core": "^7.0.0-beta.49",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "^7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"execa": "^0.10.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
],
|
||||
"stage-3"
|
||||
]
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
15
packages/vhd-cli/src/commands/check.js
Normal file
15
packages/vhd-cli/src/commands/check.js
Normal file
@@ -0,0 +1,15 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
for (const vhd of args) {
|
||||
try {
|
||||
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
|
||||
console.log('ok:', vhd)
|
||||
} catch (error) {
|
||||
console.error('nok:', vhd, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
packages/vhd-cli/src/commands/info.js
Normal file
12
packages/vhd-cli/src/commands/info.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd.header)
|
||||
console.log(vhd.footer)
|
||||
}
|
||||
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
@@ -0,0 +1,21 @@
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
export default async function main (args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
return new Promise((resolve, reject) => {
|
||||
createSyntheticStream(handler, path.resolve(args[0]))
|
||||
.on('error', reject)
|
||||
.pipe(
|
||||
createWriteStream(args[1])
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1,19 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import commands from './commands'
|
||||
|
||||
execPromise(async args => {
|
||||
const vhd = new Vhd(
|
||||
new RemoteHandlerLocal({ url: 'file:///' }),
|
||||
resolve(args[0])
|
||||
function runCommand (commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
command = 'help'
|
||||
}
|
||||
|
||||
const fn = commands[command]
|
||||
|
||||
if (fn === undefined) {
|
||||
if (command === 'help') {
|
||||
return `Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}`
|
||||
}
|
||||
|
||||
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
|
||||
}
|
||||
|
||||
return fn.call(
|
||||
{
|
||||
__proto__: this,
|
||||
command: `${this.command} ${command}`,
|
||||
},
|
||||
args
|
||||
)
|
||||
}
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd._header)
|
||||
console.log(vhd._footer)
|
||||
})
|
||||
execPromise(
|
||||
runCommand.bind(
|
||||
{
|
||||
command: 'vhd-cli',
|
||||
runCommand,
|
||||
},
|
||||
commands
|
||||
)
|
||||
)
|
||||
|
||||
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import command from './commands/info'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('can run the command', async () => {
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
|
||||
await command(['empty.vhd'])
|
||||
})
|
||||
@@ -1,461 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import fu from '@nraynaud/struct-fu'
|
||||
import { dirname } from 'path'
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
/* eslint-disable no-unused-vars */
|
||||
|
||||
const HARD_DISK_TYPE_DIFFERENCING = 4
|
||||
const HARD_DISK_TYPE_DYNAMIC = 3
|
||||
const HARD_DISK_TYPE_FIXED = 2
|
||||
const PLATFORM_CODE_NONE = 0
|
||||
export const SECTOR_SIZE = 512
|
||||
|
||||
/* eslint-enable no-unused vars */
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.byte('reserved', 426), // 86
|
||||
])
|
||||
const FOOTER_SIZE = fuFooter.size
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.byte('reserved1', 4),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
],
|
||||
8
|
||||
),
|
||||
fu.byte('reserved2', 256),
|
||||
])
|
||||
const HEADER_SIZE = fuHeader.size
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// bytes[] bit manipulation
|
||||
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
|
||||
const setBit = (map, bit) => {
|
||||
map[bit >> 3] |= 1 << (bit & 7)
|
||||
}
|
||||
const unsetBit = (map, bit) => {
|
||||
map[bit >> 3] &= ~(1 << (bit & 7))
|
||||
}
|
||||
|
||||
const addOffsets = (...offsets) =>
|
||||
offsets.reduce(
|
||||
(a, b) =>
|
||||
b == null
|
||||
? a
|
||||
: typeof b === 'object'
|
||||
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
|
||||
: { bytes: a.bytes + b, bits: a.bits },
|
||||
{ bytes: 0, bits: 0 }
|
||||
)
|
||||
|
||||
const pack = (field, value, buf, offset) => {
|
||||
field.pack(value, buf, addOffsets(field.offset, offset))
|
||||
}
|
||||
|
||||
const unpack = (field, buf, offset) =>
|
||||
field.unpack(buf, addOffsets(field.offset, offset))
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
const streamToExistingBuffer = (
|
||||
stream,
|
||||
buffer,
|
||||
offset = 0,
|
||||
end = buffer.length
|
||||
) =>
|
||||
new Promise((resolve, reject) => {
|
||||
assert(offset >= 0)
|
||||
assert(end > offset)
|
||||
assert(end <= buffer.length)
|
||||
|
||||
let i = offset
|
||||
|
||||
const onData = chunk => {
|
||||
const prev = i
|
||||
i += chunk.length
|
||||
|
||||
if (i > end) {
|
||||
return onError(new Error('too much data'))
|
||||
}
|
||||
|
||||
chunk.copy(buffer, prev)
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(i - offset)
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
const computeChecksum = (struct, buf, offset = 0) => {
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumField = struct.fields.checksum
|
||||
const checksumOffset = offset + checksumField.offset
|
||||
for (let i = offset, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = offset + struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
return ~sum >>> 0
|
||||
}
|
||||
|
||||
const verifyChecksum = (struct, buf, offset) =>
|
||||
unpack(struct.fields.checksum, buf, offset) ===
|
||||
computeChecksum(struct, buf, offset)
|
||||
|
||||
const getParentLocatorSize = parentLocatorEntry => {
|
||||
const { platformDataSpace } = parentLocatorEntry
|
||||
|
||||
if (platformDataSpace < SECTOR_SIZE) {
|
||||
return platformDataSpace * SECTOR_SIZE
|
||||
}
|
||||
|
||||
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Euclidean division, returns the quotient and the remainder of a / b.
|
||||
const div = (a, b) => [Math.floor(a / b), a % b]
|
||||
|
||||
export default class Vhd {
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
|
||||
this._blockAllocationTable = null
|
||||
this._blockBitmapSize = null
|
||||
this._footer = null
|
||||
this._header = null
|
||||
this._parent = null
|
||||
this._sectorsPerBlock = null
|
||||
}
|
||||
|
||||
// Read `length` bytes starting from `begin`.
|
||||
//
|
||||
// - if `buffer`: it is filled starting from `offset`, and the
|
||||
// number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_read (begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
return this._handler
|
||||
.createReadStream(this._path, {
|
||||
end: begin + length - 1,
|
||||
start: begin,
|
||||
})
|
||||
.then(
|
||||
buf
|
||||
? stream =>
|
||||
streamToExistingBuffer(
|
||||
stream,
|
||||
buf,
|
||||
offset,
|
||||
(offset || 0) + length
|
||||
)
|
||||
: streamToNewBuffer
|
||||
)
|
||||
}
|
||||
|
||||
// - if `buffer`: it is filled with 0 starting from `offset`, and
|
||||
// the number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_zeroes (length, buf, offset = 0) {
|
||||
if (buf) {
|
||||
assert(offset >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const end = offset + length
|
||||
assert(end <= buf.length)
|
||||
|
||||
buf.fill(0, offset, end)
|
||||
return Promise.resolve(length)
|
||||
}
|
||||
|
||||
return Promise.resolve(Buffer.alloc(length))
|
||||
}
|
||||
|
||||
// Return the position of a block in the VHD or undefined if not found.
|
||||
_getBlockAddress (block) {
|
||||
assert(block >= 0)
|
||||
assert(block < this._header.maxTableEntries)
|
||||
|
||||
const blockAddr = this._blockAllocationTable[block]
|
||||
if (blockAddr !== 0xffffffff) {
|
||||
return blockAddr * SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
|
||||
if (!verifyChecksum(fuFooter, buf)) {
|
||||
throw new Error('footer checksum does not match')
|
||||
}
|
||||
|
||||
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
|
||||
throw new Error('header checksum does not match')
|
||||
}
|
||||
|
||||
return this._initMetadata(
|
||||
unpack(fuHeader, buf, FOOTER_SIZE),
|
||||
unpack(fuFooter, buf)
|
||||
)
|
||||
}
|
||||
|
||||
async _initMetadata (header, footer) {
|
||||
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
|
||||
assert(sectorsPerBlock % 1 === 0)
|
||||
|
||||
// 1 bit per sector, rounded up to full sectors
|
||||
this._blockBitmapSize =
|
||||
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
assert(this._blockBitmapSize === SECTOR_SIZE)
|
||||
|
||||
this._footer = footer
|
||||
this._header = header
|
||||
this.size = uint32ToUint64(this._footer.currentSize)
|
||||
|
||||
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
|
||||
const parent = new Vhd(
|
||||
this._handler,
|
||||
`${dirname(this._path)}/${header.parentUnicodeName}`
|
||||
)
|
||||
await parent.readHeaderAndFooter()
|
||||
await parent.readBlockAllocationTable()
|
||||
|
||||
this._parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readBlockAllocationTable () {
|
||||
const { maxTableEntries, tableOffset } = this._header
|
||||
const fuTable = fu.uint32(maxTableEntries)
|
||||
|
||||
this._blockAllocationTable = unpack(
|
||||
fuTable,
|
||||
await this._read(uint32ToUint64(tableOffset), fuTable.size)
|
||||
)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// read a single sector in a block
|
||||
async _readBlockSector (block, sector, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
assert(begin + length <= SECTOR_SIZE)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const blockBitmapSize = this._blockBitmapSize
|
||||
const parent = this._parent
|
||||
|
||||
if (
|
||||
blockAddr &&
|
||||
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
|
||||
) {
|
||||
return this._read(
|
||||
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
return parent
|
||||
? parent._readBlockSector(block, sector, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
_readBlock (block, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const { blockSize } = this._header
|
||||
assert(begin + length <= blockSize)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const parent = this._parent
|
||||
|
||||
if (!blockAddr) {
|
||||
return parent
|
||||
? parent._readBlock(block, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
if (!parent) {
|
||||
return this._read(
|
||||
blockAddr + this._blockBitmapSize + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
// FIXME: we should read as many sectors in a single pass as
|
||||
// possible for maximum perf.
|
||||
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
|
||||
return this._readBlockSector(
|
||||
block,
|
||||
sector,
|
||||
beginInSector,
|
||||
Math.min(length, SECTOR_SIZE - beginInSector),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
read (buf, begin, length = buf.length, offset) {
|
||||
assert(Buffer.isBuffer(buf))
|
||||
assert(begin >= 0)
|
||||
|
||||
const { size } = this
|
||||
if (begin >= size) {
|
||||
return Promise.resolve(0)
|
||||
}
|
||||
|
||||
const { blockSize } = this._header
|
||||
const [block, beginInBlock] = div(begin, blockSize)
|
||||
|
||||
return this._readBlock(
|
||||
block,
|
||||
beginInBlock,
|
||||
Math.min(length, blockSize - beginInBlock, size - begin),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
}
|
||||
3
packages/vhd-lib/.babelrc.js
Normal file
3
packages/vhd-lib/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
56
packages/vhd-lib/package.json
Normal file
56
packages/vhd-lib/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.1.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.49",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^6.0.1",
|
||||
"get-stream": "^3.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"@xen-orchestra/fs": "^0.0.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"execa": "^0.10.0",
|
||||
"fs-promise": "^2.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
7
packages/vhd-lib/src/_bitmap.js
Normal file
7
packages/vhd-lib/src/_bitmap.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const MASK = 0x80
|
||||
|
||||
export const set = (map, bit) => {
|
||||
map[bit >> 3] |= MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
|
||||
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default function computeGeometryForSize (size) {
|
||||
const totalSectors = Math.ceil(size / 512)
|
||||
let sectorsPerTrackCylinder
|
||||
let heads
|
||||
let cylinderTimesHeads
|
||||
if (totalSectors > 65535 * 16 * 255) {
|
||||
throw Error('disk is too big')
|
||||
}
|
||||
// straight copypasta from the file spec appendix on CHS Calculation
|
||||
if (totalSectors >= 65535 * 16 * 63) {
|
||||
sectorsPerTrackCylinder = 255
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
} else {
|
||||
sectorsPerTrackCylinder = 17
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
|
||||
if (heads < 4) {
|
||||
heads = 4
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
|
||||
sectorsPerTrackCylinder = 31
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024) {
|
||||
sectorsPerTrackCylinder = 63
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
}
|
||||
const cylinders = Math.ceil(cylinderTimesHeads / heads)
|
||||
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
|
||||
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
|
||||
}
|
||||
30
packages/vhd-lib/src/_constants.js
Normal file
30
packages/vhd-lib/src/_constants.js
Normal file
@@ -0,0 +1,30 @@
|
||||
export const BLOCK_UNUSED = 0xffffffff
|
||||
|
||||
// This lib has been extracted from the Xen Orchestra project.
|
||||
export const CREATOR_APPLICATION = 'xo '
|
||||
|
||||
// Sizes in bytes.
|
||||
export const FOOTER_SIZE = 512
|
||||
export const HEADER_SIZE = 1024
|
||||
export const SECTOR_SIZE = 512
|
||||
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
|
||||
|
||||
export const FOOTER_COOKIE = 'conectix'
|
||||
export const HEADER_COOKIE = 'cxsparse'
|
||||
|
||||
export const DISK_TYPE_FIXED = 2
|
||||
export const DISK_TYPE_DYNAMIC = 3
|
||||
export const DISK_TYPE_DIFFERENCING = 4
|
||||
|
||||
export const PARENT_LOCATOR_ENTRIES = 8
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_WI2R = 0x57693272
|
||||
export const PLATFORM_WI2K = 0x5769326b
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
export const FILE_FORMAT_VERSION = 1 << 16
|
||||
export const HEADER_VERSION = 1 << 16
|
||||
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import { v4 as generateUuid } from 'uuid'
|
||||
|
||||
import { checksumStruct, fuFooter, fuHeader } from './_structs'
|
||||
import {
|
||||
CREATOR_APPLICATION,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_FIXED,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PLATFORM_WI2K,
|
||||
} from './_constants'
|
||||
|
||||
export function createFooter (
|
||||
size,
|
||||
timestamp,
|
||||
geometry,
|
||||
dataOffset,
|
||||
diskType = DISK_TYPE_FIXED
|
||||
) {
|
||||
const footer = fuFooter.pack({
|
||||
cookie: FOOTER_COOKIE,
|
||||
features: 2,
|
||||
fileFormatVersion: FILE_FORMAT_VERSION,
|
||||
dataOffset,
|
||||
timestamp,
|
||||
creatorApplication: CREATOR_APPLICATION,
|
||||
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
|
||||
originalSize: size,
|
||||
currentSize: size,
|
||||
diskGeometry: geometry,
|
||||
diskType,
|
||||
uuid: generateUuid(null, []),
|
||||
})
|
||||
checksumStruct(footer, fuFooter)
|
||||
return footer
|
||||
}
|
||||
|
||||
export function createHeader (
|
||||
maxTableEntries,
|
||||
tableOffset = HEADER_SIZE + FOOTER_SIZE,
|
||||
blockSize = VHD_BLOCK_SIZE_BYTES
|
||||
) {
|
||||
const header = fuHeader.pack({
|
||||
cookie: HEADER_COOKIE,
|
||||
tableOffset,
|
||||
headerVersion: HEADER_VERSION,
|
||||
maxTableEntries,
|
||||
blockSize,
|
||||
})
|
||||
checksumStruct(header, fuHeader)
|
||||
return header
|
||||
}
|
||||
121
packages/vhd-lib/src/_structs.js
Normal file
121
packages/vhd-lib/src/_structs.js
Normal file
@@ -0,0 +1,121 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
|
||||
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
const uint64Undefinable = fu.derive(
|
||||
fu.uint32(2),
|
||||
number =>
|
||||
number === undefined
|
||||
? [0xffffffff, 0xffffffff]
|
||||
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ =>
|
||||
_[0] === 0xffffffff && _[1] === 0xffffffff
|
||||
? undefined
|
||||
: _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
export const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64Undefinable('dataOffset'), // offset of the header
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
|
||||
|
||||
export const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
uint64Undefinable('dataOffset'),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
assert.strictEqual(fuHeader.size, HEADER_SIZE)
|
||||
|
||||
export const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
export const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
export function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
37
packages/vhd-lib/src/chain.js
Normal file
37
packages/vhd-lib/src/chain.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { dirname, relative } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING } from './_constants'
|
||||
|
||||
export default async function chain (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockAllocationTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.setUniqueParentLocator(parentName)
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
}
|
||||
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
|
||||
export default asyncIteratorToStream(async function * (size, blockParser) {
|
||||
const geometry = computeGeometryForSize(size)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry
|
||||
)
|
||||
let position = 0
|
||||
|
||||
function * filePadding (paddingLength) {
|
||||
if (paddingLength > 0) {
|
||||
const chunkSize = 1024 * 1024 // 1Mo
|
||||
for (
|
||||
let paddingPosition = 0;
|
||||
paddingPosition + chunkSize < paddingLength;
|
||||
paddingPosition += chunkSize
|
||||
) {
|
||||
yield Buffer.alloc(chunkSize)
|
||||
}
|
||||
yield Buffer.alloc(paddingLength % chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.offsetBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield * filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.offsetBytes + next.data.length
|
||||
}
|
||||
yield * filePadding(actualSize - position)
|
||||
yield footer
|
||||
})
|
||||
126
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
126
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
@@ -0,0 +1,126 @@
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
import { set as setBitmap } from './_bitmap'
|
||||
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
|
||||
*/
|
||||
function createBAT (
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
assert.strictEqual(blockPosition % 512, 0)
|
||||
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export default asyncIteratorToStream(async function * (
|
||||
diskSize,
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
|
||||
)
|
||||
}
|
||||
|
||||
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
|
||||
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
|
||||
|
||||
const batPosition = FOOTER_SIZE + HEADER_SIZE
|
||||
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
|
||||
const geometry = computeGeometryForSize(diskSize)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry,
|
||||
FOOTER_SIZE,
|
||||
DISK_TYPE_DYNAMIC
|
||||
)
|
||||
const header = createHeader(
|
||||
maxTableEntries,
|
||||
batPosition,
|
||||
VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
createBAT(firstBlockPosition, blockAddressList, ratio, bat, bitmapSize)
|
||||
let position = 0
|
||||
function * yieldAndTrack (buffer, expectedPosition) {
|
||||
if (expectedPosition !== undefined) {
|
||||
assert.strictEqual(position, expectedPosition)
|
||||
}
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
position += buffer.length
|
||||
}
|
||||
}
|
||||
async function * generateFileContent (blockIterator, bitmapSize, ratio) {
|
||||
let currentBlock = -1
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockWithBitmap = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
currentBlock++
|
||||
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
|
||||
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batIndex !== currentVhdBlockIndex) {
|
||||
if (currentVhdBlockIndex >= 0) {
|
||||
yield * yieldAndTrack(
|
||||
currentBlockWithBitmap,
|
||||
bat.readUInt32BE(currentVhdBlockIndex * 4) * 512
|
||||
)
|
||||
}
|
||||
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
|
||||
currentVhdBlockIndex = batIndex
|
||||
}
|
||||
const blockOffset = (next.offsetBytes / 512) % VHD_BLOCK_SIZE_SECTORS
|
||||
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
|
||||
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
|
||||
}
|
||||
next.data.copy(
|
||||
currentBlockWithBitmap,
|
||||
bitmapSize + next.offsetBytes % VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
}
|
||||
yield * yieldAndTrack(currentBlockWithBitmap)
|
||||
}
|
||||
yield * yieldAndTrack(footer, 0)
|
||||
yield * yieldAndTrack(header, FOOTER_SIZE)
|
||||
yield * yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
|
||||
yield * generateFileContent(blockIterator, bitmapSize, ratio)
|
||||
yield * yieldAndTrack(footer)
|
||||
})
|
||||
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
@@ -0,0 +1,153 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { dirname, resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
export default asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = vhds[nVhds - 1]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: FOOTER_SIZE + HEADER_SIZE,
|
||||
parentTimestamp: rootVhd.header.parentTimestamp,
|
||||
parentUnicodeName: rootVhd.header.parentUnicodeName,
|
||||
parentUuid: rootVhd.header.parentUuid,
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(vhd.batSize)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
dataOffset: FOOTER_SIZE,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil(
|
||||
(header.tableOffset + bat.length) / SECTOR_SIZE
|
||||
);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
// TODO: for generic usage the bitmap needs to be properly computed for each block
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd === rootVhd
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
8
packages/vhd-lib/src/index.js
Normal file
8
packages/vhd-lib/src/index.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export { default } from './vhd'
|
||||
export { default as chainVhd } from './chain'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
283
packages/vhd-lib/src/merge.integ.spec.js
Normal file
283
packages/vhd-lib/src/merge.integ.spec.js
Normal file
@@ -0,0 +1,283 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import chainVhd from './chain'
|
||||
import createReadStream from './createSyntheticStream'
|
||||
import Vhd from './vhd'
|
||||
import vhdMerge from './merge'
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile (name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
}
|
||||
|
||||
async function checkFile (vhdName) {
|
||||
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
|
||||
}
|
||||
|
||||
async function recoverRawContent (vhdName, rawName, originalSize) {
|
||||
await checkFile(vhdName)
|
||||
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
|
||||
if (originalSize !== undefined) {
|
||||
await execa('truncate', ['-s', originalSize, rawName])
|
||||
}
|
||||
}
|
||||
|
||||
async function convertFromRawToVhd (rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
const entry = vhd._getBatEntry(i)
|
||||
if (entry !== 0xffffffff) {
|
||||
const block = (await vhd2._readBlock(i)).data
|
||||
await fs.write(
|
||||
recoveredFile,
|
||||
block,
|
||||
0,
|
||||
block.length,
|
||||
vhd2.header.blockSize * i
|
||||
)
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
)
|
||||
expect(recovered).toEqual(randomBuffer)
|
||||
})
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
const startSecondWrite = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
await newVhd.writeData(
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
)
|
||||
})
|
||||
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = createReadStream(handler, 'randomfile.vhd')
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
})
|
||||
77
packages/vhd-lib/src/merge.js
Normal file
77
packages/vhd-lib/src/merge.js
Normal file
@@ -0,0 +1,77 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
export default concurrency(2)(async function merge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
const parentDiskType = parentVhd.footer.diskType
|
||||
assert(
|
||||
parentDiskType === DISK_TYPE_DIFFERENCING ||
|
||||
parentDiskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([
|
||||
parentVhd.readBlockAllocationTable(),
|
||||
childVhd.readBlockAllocationTable(),
|
||||
])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
@@ -0,0 +1,134 @@
|
||||
/* eslint-env jest */
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
|
||||
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
import createReadableRawVHDStream from './createReadableRawStream'
|
||||
import createReadableSparseVHDStream from './createReadableSparseStream'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
createFooter(104448, Math.floor(Date.now() / 1000), {
|
||||
cylinders: 3,
|
||||
heads: 4,
|
||||
sectorsPerTrack: 17,
|
||||
})
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawVHDStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
return expect(
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawVHDStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
|
||||
test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
offsetBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: blockSize * 100,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
const fileSize = blockSize * 110
|
||||
const stream = createReadableSparseVHDStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
blocks.map(b => b.offsetBytes),
|
||||
blocks
|
||||
)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
631
packages/vhd-lib/src/vhd.js
Normal file
631
packages/vhd-lib/src/vhd.js
Normal file
@@ -0,0 +1,631 @@
|
||||
import assert from 'assert'
|
||||
import getStream from 'get-stream'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import constantStream from './_constant-stream'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
const computeBatSize = entries =>
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
const expected = checksumStruct(buf, struct)
|
||||
if (actual !== expected) {
|
||||
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
|
||||
}
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Format:
|
||||
//
|
||||
// 1. Footer (512)
|
||||
// 2. Header (1024)
|
||||
// 3. Unordered entries
|
||||
// - BAT (batSize @ header.tableOffset)
|
||||
// - Blocks (@ blockOffset(i))
|
||||
// - bitmap (blockBitmapSize)
|
||||
// - data (header.blockSize)
|
||||
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
|
||||
// 4. Footer (512 @ vhdSize - 512)
|
||||
//
|
||||
// Variables:
|
||||
//
|
||||
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
|
||||
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
|
||||
// - blockOffset(i) = bat[i] * sectorSize
|
||||
// - nBlocks = ceil(footer.currentSize / header.blockSize)
|
||||
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
|
||||
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
|
||||
// - sectorSize = 512
|
||||
|
||||
export default class Vhd {
|
||||
get batSize () {
|
||||
return computeBatSize(this.header.maxTableEntries)
|
||||
}
|
||||
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Read functions.
|
||||
// =================================================================
|
||||
|
||||
_readStream (start, n) {
|
||||
return this._handler.createReadStream(this._path, {
|
||||
start,
|
||||
end: start + n - 1, // end is inclusive
|
||||
})
|
||||
}
|
||||
|
||||
_read (start, n) {
|
||||
return this._readStream(start, n)
|
||||
.then(getStream.buffer)
|
||||
.then(buf => {
|
||||
assert.equal(buf.length, n)
|
||||
return buf
|
||||
})
|
||||
}
|
||||
|
||||
containsBlock (id) {
|
||||
return this._getBatEntry(id) !== BLOCK_UNUSED
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(end, header.tableOffset + this.batSize)
|
||||
|
||||
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
|
||||
if (entry.platformCode !== PLATFORM_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of headers: ${end}.`)
|
||||
|
||||
return end
|
||||
}
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData () {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
for (let i = 0; i < maxTableEntries; i++) {
|
||||
const blockAddr = this._getBatEntry(i)
|
||||
|
||||
if (blockAddr !== BLOCK_UNUSED) {
|
||||
end = Math.max(end, blockAddr + fullBlockSize)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of data: ${end}.`)
|
||||
|
||||
return sectorsToBytes(end)
|
||||
}
|
||||
|
||||
// TODO: extract the checks into reusable functions:
|
||||
// - better human reporting
|
||||
// - auto repair if possible
|
||||
async readHeaderAndFooter (checkSecondFooter = true) {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(FOOTER_SIZE)
|
||||
|
||||
assertChecksum('footer', bufFooter, fuFooter)
|
||||
assertChecksum('header', bufHeader, fuHeader)
|
||||
|
||||
if (checkSecondFooter) {
|
||||
const size = await this._handler.getSize(this._path)
|
||||
assert(
|
||||
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
|
||||
'footer1 !== footer2'
|
||||
)
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
const sectorsPerBlock = (this.sectorsPerBlock =
|
||||
header.blockSize / SECTOR_SIZE)
|
||||
|
||||
// Compute bitmap size in sectors.
|
||||
// Default: 1.
|
||||
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
|
||||
sectorsPerBlock >> 3
|
||||
))
|
||||
|
||||
// Full block size => data block size + bitmap size.
|
||||
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
|
||||
|
||||
// In bytes.
|
||||
// Default: 512.
|
||||
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
|
||||
}
|
||||
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockAllocationTable () {
|
||||
const { header } = this
|
||||
this.blockTable = await this._read(
|
||||
header.tableOffset,
|
||||
header.maxTableEntries * 4
|
||||
)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry (block) {
|
||||
return this.blockTable.readUInt32BE(block * 4)
|
||||
}
|
||||
|
||||
_readBlock (blockId, onlyBitmap = false) {
|
||||
const blockAddr = this._getBatEntry(blockId)
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
throw new Error(`no such block ${blockId}`)
|
||||
}
|
||||
|
||||
return this._read(
|
||||
sectorsToBytes(blockAddr),
|
||||
onlyBitmap ? this.bitmapSize : this.fullBlockSize
|
||||
).then(
|
||||
buf =>
|
||||
onlyBitmap
|
||||
? { id: blockId, bitmap: buf }
|
||||
: {
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks () {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
async _write (data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace (spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const stream = await this._readStream(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(stream, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async ensureBatSize (entries) {
|
||||
const { header } = this
|
||||
const prevMaxTableEntries = header.maxTableEntries
|
||||
if (prevMaxTableEntries >= entries) {
|
||||
return
|
||||
}
|
||||
|
||||
const newBatSize = computeBatSize(entries)
|
||||
await this._freeFirstBlockSpace(newBatSize - this.batSize)
|
||||
const maxTableEntries = (header.maxTableEntries = entries)
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
|
||||
debug(
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
}
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
_setBatEntry (block, blockSector) {
|
||||
const i = block * 4
|
||||
const { blockTable } = this
|
||||
|
||||
blockTable.writeUInt32BE(blockSector, i)
|
||||
|
||||
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
|
||||
}
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock (blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
// Write a bitmap at a block address.
|
||||
async writeBlockBitmap (blockAddr, bitmap) {
|
||||
const { bitmapSize } = this
|
||||
|
||||
if (bitmap.length !== bitmapSize) {
|
||||
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
|
||||
}
|
||||
|
||||
const offset = sectorsToBytes(blockAddr)
|
||||
|
||||
debug(
|
||||
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
|
||||
'hex'
|
||||
)})`
|
||||
)
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeEntireBlock (block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
|
||||
} else if (parentBitmap === undefined) {
|
||||
parentBitmap = (await this._readBlock(block.id, true)).bitmap
|
||||
}
|
||||
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${
|
||||
block.id
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
sectorsToBytes(endSectorId)
|
||||
),
|
||||
sectorsToBytes(offset)
|
||||
)
|
||||
}
|
||||
|
||||
async coalesceBlock (child, blockId) {
|
||||
const block = await child._readBlock(blockId)
|
||||
const { bitmap, data } = block
|
||||
|
||||
debug(`coalesceBlock block=${blockId}`)
|
||||
|
||||
// For each sector of block data...
|
||||
const { sectorsPerBlock } = child
|
||||
let parentBitmap = null
|
||||
for (let i = 0; i < sectorsPerBlock; i++) {
|
||||
// If no changes on one sector, skip.
|
||||
if (!mapTestBit(bitmap, i)) {
|
||||
continue
|
||||
}
|
||||
let endSector = i + 1
|
||||
|
||||
// Count changed sectors.
|
||||
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
|
||||
++endSector
|
||||
}
|
||||
|
||||
// Write n sectors into parent.
|
||||
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this.writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
}
|
||||
|
||||
// Return the merged data size
|
||||
return data.length
|
||||
}
|
||||
|
||||
// Write a context footer. (At the end and beginning of a vhd file.)
|
||||
async writeFooter (onlyEndFooter = false) {
|
||||
const { footer } = this
|
||||
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
const eof = await this._handler.getSize(this._path)
|
||||
// sometimes the file is longer than anticipated, we still need to put the footer at the end
|
||||
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
`Write footer at: ${offset} (checksum=${
|
||||
footer.checksum
|
||||
}). (data=${rawFooter.toString('hex')})`
|
||||
)
|
||||
if (!onlyEndFooter) {
|
||||
await this._write(rawFooter, 0)
|
||||
}
|
||||
await this._write(rawFooter, offset)
|
||||
}
|
||||
|
||||
writeHeader () {
|
||||
const { header } = this
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
header.checksum = checksumStruct(rawHeader, fuHeader)
|
||||
const offset = FOOTER_SIZE
|
||||
debug(
|
||||
`Write header at: ${offset} (checksum=${
|
||||
header.checksum
|
||||
}). (data=${rawHeader.toString('hex')})`
|
||||
)
|
||||
return this._write(rawHeader, offset)
|
||||
}
|
||||
|
||||
async writeData (offsetSectors, buffer) {
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
|
||||
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
|
||||
const endBufferSectors = offsetSectors + bufferSizeSectors
|
||||
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
|
||||
await this.ensureBatSize(lastBlock)
|
||||
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
|
||||
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
|
||||
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
|
||||
|
||||
for (
|
||||
let currentBlock = startBlock;
|
||||
currentBlock <= lastBlock;
|
||||
currentBlock++
|
||||
) {
|
||||
const offsetInBlockSectors = Math.max(
|
||||
0,
|
||||
offsetSectors - currentBlock * this.sectorsPerBlock
|
||||
)
|
||||
const endInBlockSectors = Math.min(
|
||||
endBufferSectors - currentBlock * this.sectorsPerBlock,
|
||||
this.sectorsPerBlock
|
||||
)
|
||||
const startInBuffer = Math.max(
|
||||
0,
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
|
||||
)
|
||||
const endInBuffer = Math.min(
|
||||
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
|
||||
SECTOR_SIZE,
|
||||
buffer.length
|
||||
)
|
||||
let inputBuffer
|
||||
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
|
||||
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
|
||||
} else {
|
||||
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
|
||||
buffer.copy(
|
||||
inputBuffer,
|
||||
offsetInBlockSectors * SECTOR_SIZE,
|
||||
startInBuffer,
|
||||
endInBuffer
|
||||
)
|
||||
}
|
||||
await this.writeBlockSectors(
|
||||
{ id: currentBlock, data: inputBuffer },
|
||||
offsetInBlockSectors,
|
||||
endInBlockSectors
|
||||
)
|
||||
}
|
||||
await this.writeFooter()
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators (neededSectors) {
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
firstLocatorOffset / SECTOR_SIZE
|
||||
if (currentSpace < neededSectors) {
|
||||
const deltaSectors = neededSectors - currentSpace
|
||||
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
|
||||
this.header.tableOffset += sectorsToBytes(deltaSectors)
|
||||
await this._write(this.blockTable, this.header.tableOffset)
|
||||
}
|
||||
return firstLocatorOffset
|
||||
}
|
||||
|
||||
async setUniqueParentLocator (fileNameString) {
|
||||
const { header } = this
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.16.6",
|
||||
"version": "0.16.9",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -143,7 +143,9 @@ export const isOpaqueRef = value =>
|
||||
|
||||
const RE_READ_ONLY_METHOD = /^[^.]+\.get_/
|
||||
const isReadOnlyCall = (method, args) =>
|
||||
args.length === 1 && isOpaqueRef(args[0]) && RE_READ_ONLY_METHOD.test(method)
|
||||
args.length === 1 &&
|
||||
typeof args[0] === 'string' &&
|
||||
RE_READ_ONLY_METHOD.test(method)
|
||||
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
@@ -180,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const getTaskResult = (task, onSuccess, onFailure) => {
|
||||
const getTaskResult = task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return [onFailure(new Cancel('task canceled'))]
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
return [onFailure(wrapError(task.error_info))]
|
||||
return Promise.reject(wrapError(task.error_info))
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return [onSuccess(task.result)]
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ export class Xapi extends EventEmitter {
|
||||
objects.getKey = getKey
|
||||
|
||||
this._objectsByRefs = createObject(null)
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = null
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = undefined
|
||||
|
||||
this._taskWatchers = Object.create(null)
|
||||
|
||||
@@ -407,15 +409,15 @@ export class Xapi extends EventEmitter {
|
||||
return this._readOnly && !isReadOnlyCall(method, args)
|
||||
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
|
||||
: this._sessionCall(`Async.${method}`, args).then(taskRef => {
|
||||
$cancelToken.promise.then(() => {
|
||||
// TODO: do not trigger if the task is already over
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
$cancelToken.promise.then(() => {
|
||||
// TODO: do not trigger if the task is already over
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// create a task and automatically destroy it when settled
|
||||
@@ -441,16 +443,18 @@ export class Xapi extends EventEmitter {
|
||||
// this lib), UUID (unique identifier that some objects have) or
|
||||
// opaque reference (internal to XAPI).
|
||||
getObject (idOrUuidOrRef, defaultValue) {
|
||||
const object =
|
||||
typeof idOrUuidOrRef === 'string'
|
||||
? this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
: this._objects.all[idOrUuidOrRef.$id]
|
||||
if (typeof idOrUuidOrRef === 'object') {
|
||||
idOrUuidOrRef = idOrUuidOrRef.$id
|
||||
}
|
||||
|
||||
if (object) return object
|
||||
const object =
|
||||
this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is not object can be matched to ' + idOrUuidOrRef)
|
||||
throw new Error('no object with UUID or opaque ref: ' + idOrUuidOrRef)
|
||||
}
|
||||
|
||||
// Returns the object for a given opaque reference (internal to
|
||||
@@ -458,11 +462,11 @@ export class Xapi extends EventEmitter {
|
||||
getObjectByRef (ref, defaultValue) {
|
||||
const object = this._objectsByRefs[ref]
|
||||
|
||||
if (object) return object
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the ref ' + ref)
|
||||
throw new Error('no object with opaque ref: ' + ref)
|
||||
}
|
||||
|
||||
// Returns the object for a given UUID (unique identifier that some
|
||||
@@ -475,7 +479,7 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the UUID ' + uuid)
|
||||
throw new Error('no object with UUID: ' + uuid)
|
||||
}
|
||||
|
||||
getRecord (type, ref) {
|
||||
@@ -575,31 +579,31 @@ export class Xapi extends EventEmitter {
|
||||
// redirection before consuming body
|
||||
const promise = isStream
|
||||
? doRequest({
|
||||
body: '',
|
||||
body: '',
|
||||
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
response.req.abort()
|
||||
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
)
|
||||
throw error
|
||||
}
|
||||
)
|
||||
: doRequest()
|
||||
|
||||
return promise.then(response => {
|
||||
@@ -638,11 +642,11 @@ export class Xapi extends EventEmitter {
|
||||
let watcher = watchers[ref]
|
||||
if (watcher === undefined) {
|
||||
// sync check if the task is already settled
|
||||
const task = this.objects.all[ref]
|
||||
const task = this._objectsByRefs[ref]
|
||||
if (task !== undefined) {
|
||||
const result = getTaskResult(task, Promise.resolve, Promise.reject)
|
||||
if (result) {
|
||||
return result[0]
|
||||
const result = getTaskResult(task)
|
||||
if (result !== undefined) {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,11 +793,12 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (
|
||||
taskWatcher !== undefined &&
|
||||
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
|
||||
) {
|
||||
delete taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
const result = getTaskResult(object)
|
||||
if (result !== undefined) {
|
||||
taskWatcher.resolve(result)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -813,7 +818,10 @@ export class Xapi extends EventEmitter {
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
taskWatcher.reject(new Error('task has been destroyed before completion'))
|
||||
const error = new Error('task has been destroyed before completion')
|
||||
error.task = object
|
||||
error.taskRef = ref
|
||||
taskWatcher.reject(error)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-acl-resolver",
|
||||
"version": "0.2.3",
|
||||
"version": "0.2.4",
|
||||
"license": "ISC",
|
||||
"description": "Xen-Orchestra internal: do ACLs resolution",
|
||||
"keywords": [],
|
||||
|
||||
@@ -50,7 +50,9 @@ const checkAuthorizationByTypes = {
|
||||
|
||||
network: or(checkSelf, checkMember('$pool')),
|
||||
|
||||
SR: or(checkSelf, checkMember('$pool')),
|
||||
PIF: checkMember('$host'),
|
||||
|
||||
SR: or(checkSelf, checkMember('$container')),
|
||||
|
||||
task: checkMember('$host'),
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.40",
|
||||
"@babel/polyfill": "7.0.0-beta.49",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^2.2.0",
|
||||
"event-to-promise": "^0.8.0",
|
||||
@@ -49,10 +49,10 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
3
packages/xo-collection/.babelrc.js
Normal file
3
packages/xo-collection/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -25,17 +25,16 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.18.0",
|
||||
"@babel/runtime": "^7.0.0-beta.49",
|
||||
"kindof": "^2.0.0",
|
||||
"lodash": "^4.17.2",
|
||||
"make-error": "^1.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"@babel/cli": "^7.0.0-beta.49",
|
||||
"@babel/core": "^7.0.0-beta.49",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "^7.0.0-beta.49",
|
||||
"cross-env": "^5.1.3",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"rimraf": "^2.6.1"
|
||||
@@ -46,22 +45,5 @@
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
],
|
||||
"stage-3"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
3
packages/xo-remote-parser/.babelrc.js
Normal file
3
packages/xo-remote-parser/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -27,10 +27,10 @@
|
||||
"lodash": "^4.13.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"@babel/cli": "^7.0.0-beta.49",
|
||||
"@babel/core": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "^7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"deep-freeze": "^0.0.1",
|
||||
"rimraf": "^2.6.1"
|
||||
@@ -40,23 +40,6 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"browsers": "> 5%",
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
],
|
||||
"stage-3"
|
||||
]
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -3,22 +3,32 @@ import { Strategy } from 'passport-saml'
|
||||
// ===================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
description:
|
||||
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
|
||||
type: 'object',
|
||||
properties: {
|
||||
cert: {
|
||||
title: 'Certificate',
|
||||
description: "Copy/paste the identity provider's certificate",
|
||||
type: 'string',
|
||||
},
|
||||
entryPoint: {
|
||||
title: 'Entry point',
|
||||
description: 'Entry point of the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
issuer: {
|
||||
title: 'Issuer',
|
||||
description: 'Issuer string to supply to the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer'],
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -42,6 +52,7 @@ class AuthSamlXoPlugin {
|
||||
new Strategy(this._conf, async (profile, done) => {
|
||||
const name = profile[this._usernameField]
|
||||
if (!name) {
|
||||
console.warn('xo-server-auth-saml:', profile)
|
||||
done('no name found for this user')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.10.0",
|
||||
"version": "0.11.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -35,6 +35,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.26.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -42,6 +43,7 @@
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
@@ -56,7 +58,8 @@
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash"
|
||||
"lodash",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, startCase } from 'lodash'
|
||||
|
||||
import { forEach, get, startCase } from 'lodash'
|
||||
import pkg from '../package'
|
||||
|
||||
export const configurationSchema = {
|
||||
@@ -37,13 +36,19 @@ const ICON_FAILURE = '🚨'
|
||||
const ICON_SKIPPED = '⏩'
|
||||
const ICON_SUCCESS = '✔'
|
||||
|
||||
const STATUS_ICON = {
|
||||
skipped: ICON_SKIPPED,
|
||||
success: ICON_SUCCESS,
|
||||
failure: ICON_FAILURE,
|
||||
}
|
||||
|
||||
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
: timestamp => moment(timestamp).format(DATE_FORMAT)
|
||||
|
||||
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
|
||||
@@ -66,6 +71,7 @@ const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
const UNHEALTHY_VDI_CHAIN_MESSAGE =
|
||||
@@ -94,14 +100,352 @@ class BackupReportsXoPlugin {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper (status) {
|
||||
return new Promise(resolve => resolve(this._listener(status))).catch(
|
||||
logError
|
||||
)
|
||||
_wrapper (status, job, schedule, runJobId) {
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule, runJobId)
|
||||
: this._listener(status, job, schedule, runJobId)
|
||||
)
|
||||
).catch(logError)
|
||||
}
|
||||
|
||||
async _backupNgListener (_1, _2, { timezone }, runJobId) {
|
||||
const xo = this._xo
|
||||
const log = await xo.getBackupNgLogs(runJobId)
|
||||
|
||||
const { reportWhen, mode } = log.data || {}
|
||||
if (reportWhen === 'never') {
|
||||
return
|
||||
}
|
||||
|
||||
const formatDate = createDateFormater(timezone)
|
||||
|
||||
if (log.status === 'success' && reportWhen === 'failure') {
|
||||
return
|
||||
}
|
||||
|
||||
const jobName = (await xo.getJob(log.jobId, 'backup')).name
|
||||
if (log.result !== undefined) {
|
||||
let markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(log.start)}`,
|
||||
`- **End time**: ${formatDate(log.end)}`,
|
||||
`- **Duration**: ${formatDuration(log.end - log.start)}`,
|
||||
`- **Error**: ${log.result.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${
|
||||
log.status
|
||||
} − Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${
|
||||
log.status
|
||||
}] Backup report for ${jobName} - Error : ${log.result.message}`,
|
||||
})
|
||||
}
|
||||
|
||||
const failedVmsText = []
|
||||
const skippedVmsText = []
|
||||
const successfulVmsText = []
|
||||
const nagiosText = []
|
||||
|
||||
let globalMergeSize = 0
|
||||
let globalTransferSize = 0
|
||||
let nFailures = 0
|
||||
let nSkipped = 0
|
||||
for (const taskLog of log.tasks) {
|
||||
if (taskLog.status === 'success' && reportWhen === 'failure') {
|
||||
return
|
||||
}
|
||||
|
||||
const vmId = taskLog.data.id
|
||||
let vm
|
||||
try {
|
||||
vm = xo.getObject(vmId)
|
||||
} catch (e) {}
|
||||
const text = [
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
`- **Start time**: ${formatDate(taskLog.start)}`,
|
||||
`- **End time**: ${formatDate(taskLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(taskLog.end - taskLog.start)}`,
|
||||
]
|
||||
|
||||
const failedSubTasks = []
|
||||
const snapshotText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
|
||||
for (const subTaskLog of taskLog.tasks || []) {
|
||||
const icon = STATUS_ICON[subTaskLog.status]
|
||||
const errorMessage = ` - **Error**: ${get(
|
||||
subTaskLog.result,
|
||||
'message'
|
||||
)}`
|
||||
|
||||
if (subTaskLog.message === 'snapshot') {
|
||||
snapshotText.push(
|
||||
`- **Snapshot** ${icon}`,
|
||||
` - **Start time**: ${formatDate(subTaskLog.start)}`,
|
||||
` - **End time**: ${formatDate(subTaskLog.end)}`
|
||||
)
|
||||
} else if (subTaskLog.data.type === 'remote') {
|
||||
const id = subTaskLog.data.id
|
||||
const remote = await xo.getRemote(id).catch(() => {})
|
||||
remotesText.push(
|
||||
` - **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${id}) ${icon}`,
|
||||
` - **Start time**: ${formatDate(subTaskLog.start)}`,
|
||||
` - **End time**: ${formatDate(subTaskLog.end)}`,
|
||||
` - **Duration**: ${formatDuration(
|
||||
subTaskLog.end - subTaskLog.start
|
||||
)}`
|
||||
)
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : id)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const id = subTaskLog.data.id
|
||||
let sr
|
||||
try {
|
||||
sr = xo.getObject(id)
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
|
||||
srsText.push(
|
||||
` - **${srName}** (${srUuid}) ${icon}`,
|
||||
` - **Start time**: ${formatDate(subTaskLog.start)}`,
|
||||
` - **End time**: ${formatDate(subTaskLog.end)}`,
|
||||
` - **Duration**: ${formatDuration(
|
||||
subTaskLog.end - subTaskLog.start
|
||||
)}`
|
||||
)
|
||||
if (subTaskLog.status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
|
||||
forEach(subTaskLog.tasks, operationLog => {
|
||||
const size = operationLog.result.size
|
||||
if (operationLog.message === 'merge') {
|
||||
globalMergeSize += size
|
||||
} else {
|
||||
globalTransferSize += size
|
||||
}
|
||||
const operationText = [
|
||||
` - **${operationLog.message}** ${
|
||||
STATUS_ICON[operationLog.status]
|
||||
}`,
|
||||
` - **Start time**: ${formatDate(operationLog.start)}`,
|
||||
` - **End time**: ${formatDate(operationLog.end)}`,
|
||||
` - **Duration**: ${formatDuration(
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
operationLog.status === 'failure'
|
||||
? `- **Error**: ${get(operationLog.result, 'message')}`
|
||||
: ` - **Size**: ${formatSize(size)}`,
|
||||
` - **Speed**: ${formatSpeed(
|
||||
size,
|
||||
operationLog.end - operationLog.start
|
||||
)}`,
|
||||
].join('\n')
|
||||
if (get(subTaskLog, 'data.type') === 'remote') {
|
||||
remotesText.push(operationText)
|
||||
remotesText.join('\n')
|
||||
}
|
||||
if (get(subTaskLog, 'data.type') === 'SR') {
|
||||
srsText.push(operationText)
|
||||
srsText.join('\n')
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`- **SRs**`)
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`- **Remotes**`)
|
||||
}
|
||||
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
|
||||
if (taskLog.result !== undefined) {
|
||||
if (taskLog.status === 'skipped') {
|
||||
++nSkipped
|
||||
skippedVmsText.push(
|
||||
...text,
|
||||
`- **Reason**: ${
|
||||
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: taskLog.result.message
|
||||
}`,
|
||||
''
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
taskLog.result.message
|
||||
} ]`
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(
|
||||
...text,
|
||||
`- **Error**: ${taskLog.result.message}`,
|
||||
''
|
||||
)
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
|
||||
taskLog.result.message
|
||||
} ]`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
if (taskLog.status === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
nagiosText.push(
|
||||
`[(Failed) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
}: (failed)[${failedSubTasks.toString()}]]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const nVms = log.tasks.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped
|
||||
let markdown = [
|
||||
`## Global status: ${log.status}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(log.start)}`,
|
||||
`- **End time**: ${formatDate(log.end)}`,
|
||||
`- **Duration**: ${formatDuration(log.start - log.end)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
|
||||
'',
|
||||
...failedVmsText
|
||||
)
|
||||
}
|
||||
|
||||
if (nSkipped !== 0) {
|
||||
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
|
||||
'',
|
||||
...successfulVmsText
|
||||
)
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${log.status} − Backup report for ${jobName} ${
|
||||
STATUS_ICON[log.status]
|
||||
}`,
|
||||
nagiosStatus: log.status === 'success' ? 0 : 2,
|
||||
nagiosMarkdown:
|
||||
log.status === 'success'
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
nagiosStatus,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
_listener (status) {
|
||||
const { calls } = status
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
error.message === NO_VMS_MATCH_THIS_PATTERN
|
||||
? ['Skipped', ICON_SKIPPED]
|
||||
: ['Failure', ICON_FAILURE]
|
||||
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(status.end - status.start)}`,
|
||||
`- **Error**: ${error.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
|
||||
error.message
|
||||
}`,
|
||||
})
|
||||
}
|
||||
|
||||
const callIds = Object.keys(calls)
|
||||
|
||||
const nCalls = callIds.length
|
||||
@@ -139,8 +483,6 @@ class BackupReportsXoPlugin {
|
||||
const skippedBackupsText = []
|
||||
const successfulBackupText = []
|
||||
|
||||
const formatDate = createDateFormater(status.timezone)
|
||||
|
||||
forEach(calls, call => {
|
||||
const { id = call.params.vm } = call.params
|
||||
|
||||
@@ -226,20 +568,21 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const { end, start } = status
|
||||
const { tag } = oneCall.params
|
||||
const duration = end - start
|
||||
const duration = status.end - status.start
|
||||
const nSuccesses = nCalls - nFailures - nSkipped
|
||||
const globalStatus = globalSuccess
|
||||
? `Success`
|
||||
: nFailures !== 0 ? `Failure` : `Skipped`
|
||||
: nFailures !== 0
|
||||
? `Failure`
|
||||
: `Skipped`
|
||||
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Type**: ${formatMethod(method)}`,
|
||||
`- **Start time**: ${formatDate(start)}`,
|
||||
`- **End time**: ${formatDate(end)}`,
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(duration)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nCalls}`,
|
||||
]
|
||||
@@ -285,37 +628,22 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: globalSuccess ? 0 : 2,
|
||||
message: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
}),
|
||||
])
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0
|
||||
? ICON_FAILURE
|
||||
: ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -128,10 +128,15 @@ class XoServerCloud {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const namespaceCatalog = await this._getNamespaceCatalog(namespace)
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token: namespaceCatalog._token,
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.2",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.11.6",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-perf-alert",
|
||||
"version": "0.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -20,16 +20,16 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.2",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "^7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "^7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const VM_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'VM CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'VM memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
|
||||
|
||||
const HOST_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'host CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'host memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
|
||||
)
|
||||
},
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => {
|
||||
return getDisplayableValue() > threshold
|
||||
},
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const SR_FUNCTIONS = {
|
||||
storageUsage: {
|
||||
name: 'SR storage usage',
|
||||
description:
|
||||
'Raises an alarm when the used disk space % is higher than the threshold',
|
||||
unit: '% used',
|
||||
comparator: '>',
|
||||
createGetter: threshold => sr => {
|
||||
const getDisplayableValue = () =>
|
||||
sr.physical_utilisation * 100 / sr.physical_size
|
||||
return {
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
|
||||
const TYPE_FUNCTION_MAP = {
|
||||
vm: VM_FUNCTIONS,
|
||||
host: HOST_FUNCTIONS,
|
||||
sr: SR_FUNCTIONS,
|
||||
}
|
||||
|
||||
// list of currently ringing alarms, to avoid double notification
|
||||
@@ -229,11 +250,52 @@ export const configurationSchema = {
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
srMonitors: {
|
||||
type: 'array',
|
||||
title: 'SR Monitors',
|
||||
description:
|
||||
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
|
||||
'The Average is compared to the threshold and an alarm is raised upon crossing',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
uuids: {
|
||||
title: 'SRs',
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
$type: 'SR',
|
||||
},
|
||||
},
|
||||
variableName: {
|
||||
title: 'Alarm Type',
|
||||
description: Object.keys(SR_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
|
||||
SR_FUNCTIONS[k].description
|
||||
}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
default: Object.keys(SR_FUNCTIONS)[0],
|
||||
enum: Object.keys(SR_FUNCTIONS),
|
||||
},
|
||||
alarmTriggerLevel: {
|
||||
title: 'Threshold',
|
||||
description:
|
||||
'The direction of the crossing is given by the Alarm type',
|
||||
type: 'number',
|
||||
default: 80,
|
||||
},
|
||||
},
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
toEmails: {
|
||||
type: 'array',
|
||||
title: 'Email addresses',
|
||||
description: 'Email addresses of the alert recipients',
|
||||
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
|
||||
currentAlarms[alarmId] = true
|
||||
raiseCallback(alarmId)
|
||||
}
|
||||
} else {
|
||||
if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
} else if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
|
||||
clearCurrentAlarms()
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_generateUrl (type, object) {
|
||||
const map = {
|
||||
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
|
||||
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
|
||||
const { baseUrl } = this._configuration
|
||||
const { uuid } = object
|
||||
switch (type) {
|
||||
case 'vm':
|
||||
return `${baseUrl}#/vms/${uuid}/stats`
|
||||
case 'host':
|
||||
return `${baseUrl}#/hosts/${uuid}/stats`
|
||||
case 'sr':
|
||||
return `${baseUrl}#/srs/${uuid}/general`
|
||||
default:
|
||||
return 'unknown type'
|
||||
}
|
||||
return map[type]()
|
||||
}
|
||||
|
||||
async test () {
|
||||
const hostMonitorPart2 = await Promise.all(
|
||||
map(this._getMonitors(), async m => {
|
||||
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
|
||||
return `
|
||||
const monitorBodies = await Promise.all(
|
||||
map(
|
||||
this._getMonitors(),
|
||||
async m => `
|
||||
## Monitor for ${m.title}
|
||||
|
||||
${m.tableHeader}
|
||||
${tableBody.join('')}`
|
||||
})
|
||||
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
|
||||
)
|
||||
)
|
||||
|
||||
this._sendAlertEmail(
|
||||
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
|
||||
`
|
||||
# Performance Alert Test
|
||||
Your alarms and their current status:
|
||||
${hostMonitorPart2.join('\n')}`
|
||||
${monitorBodies.join('\n')}`
|
||||
)
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_parseDefinition (definition) {
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${
|
||||
definition.alarmTriggerLevel
|
||||
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
|
||||
definition.alarmTriggerPeriod !== undefined
|
||||
? definition.alarmTriggerPeriod
|
||||
: 60
|
||||
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
|
||||
return {
|
||||
...definition,
|
||||
alarmId,
|
||||
vmFunction: typeFunction,
|
||||
title: `${typeText} ${definition.variableName} ${
|
||||
typeFunction.comparator
|
||||
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
|
||||
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
|
||||
title: `${typeFunction.name} ${typeFunction.comparator} ${
|
||||
definition.alarmTriggerLevel
|
||||
}${typeFunction.unit}`,
|
||||
snapshot: async () => {
|
||||
return Promise.all(
|
||||
map(definition.uuids, async uuid => {
|
||||
try {
|
||||
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
|
||||
const objectLink = `[${
|
||||
monitoredObject.name_label
|
||||
}](${this._generateUrl(definition.objectType, monitoredObject)})`
|
||||
const rrd = await this.getRrd(monitoredObject, observationPeriod)
|
||||
const couldFindRRD = rrd !== null
|
||||
const result = {
|
||||
object: monitoredObject,
|
||||
couldFindRRD,
|
||||
objectLink: objectLink,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
|
||||
uuid,
|
||||
name: definition.name,
|
||||
object: this._xo.getXapi(uuid).getObject(uuid),
|
||||
}
|
||||
if (!couldFindRRD) {
|
||||
return result
|
||||
|
||||
if (result.object === undefined) {
|
||||
throw new Error('object not found')
|
||||
}
|
||||
const data = parseData(rrd, monitoredObject.uuid)
|
||||
const textValue =
|
||||
data.getDisplayableValue().toFixed(1) + typeFunction.unit
|
||||
const shouldAlarm = data.shouldAlarm()
|
||||
return {
|
||||
...result,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: shouldAlarm,
|
||||
textValue: textValue,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: ${textValue}\n`,
|
||||
tableItem: `${objectLink} | ${textValue} | ${
|
||||
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
|
||||
}\n`,
|
||||
|
||||
result.objectLink = `[${
|
||||
result.object.name_label
|
||||
}](${this._generateUrl(definition.objectType, result.object)})`
|
||||
|
||||
if (typeFunction.createGetter === undefined) {
|
||||
// Stats via RRD
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Stats via XAPI
|
||||
const getter = typeFunction.createGetter(
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
|
||||
result.listItem = ` * ${result.objectLink}: ${
|
||||
result.value === undefined
|
||||
? "**Can't read performance counters**"
|
||||
: result.value.toFixed(1) + typeFunction.unit
|
||||
}\n`
|
||||
|
||||
return result
|
||||
} catch (_) {
|
||||
return {
|
||||
uuid,
|
||||
object: null,
|
||||
couldFindRRD: false,
|
||||
objectLink: `cannot find object ${uuid}`,
|
||||
listItem: ` * ${typeText} ${uuid} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
|
||||
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
|
||||
_getMonitors () {
|
||||
return map(this._configuration.hostMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'host' })
|
||||
).concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.srMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'sr' })
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
async _checkMonitors () {
|
||||
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
|
||||
for (const entry of snapshot) {
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}|RRD`,
|
||||
!entry.couldFindRRD,
|
||||
entry.value === undefined,
|
||||
() => {
|
||||
this._sendAlertEmail(
|
||||
'Secondary Issue',
|
||||
@@ -477,9 +553,11 @@ ${entry.listItem}`
|
||||
},
|
||||
() => {}
|
||||
)
|
||||
if (!entry.couldFindRRD) {
|
||||
|
||||
if (entry.value === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const raiseAlarm = alarmId => {
|
||||
// sample XenCenter message:
|
||||
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
|
||||
@@ -500,23 +578,24 @@ ${entry.listItem}`
|
||||
this._sendAlertEmail(
|
||||
'',
|
||||
`
|
||||
## ALERT ${monitor.title}
|
||||
## ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
const lowerAlarm = alarmId => {
|
||||
console.log('lowering Alarm', alarmId)
|
||||
this._sendAlertEmail(
|
||||
'END OF ALERT',
|
||||
`
|
||||
## END OF ALERT ${monitor.title}
|
||||
## END OF ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}`,
|
||||
entry.shouldAlarm,
|
||||
|
||||
@@ -58,7 +58,8 @@ export const configurationSchema = {
|
||||
},
|
||||
port: {
|
||||
type: 'integer',
|
||||
description: 'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
description:
|
||||
'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
},
|
||||
secure: {
|
||||
default: false,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.3.2",
|
||||
"version": "0.4.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -34,7 +34,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.2",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.23.0",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
.top table{
|
||||
margin: auto;
|
||||
margin-top: 20px;
|
||||
width: 400px;
|
||||
min-width: 30em;
|
||||
}
|
||||
|
||||
.top table caption {
|
||||
@@ -121,6 +121,10 @@
|
||||
border:1px solid #95a5a6;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.allResources table {
|
||||
min-width: 60em
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
@@ -151,86 +155,34 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.vms.number}}</td>
|
||||
<td>
|
||||
{{#if global.vmsEvolution.number}}
|
||||
{{#compare global.vmsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{global.vms.number}} {{normaliseEvolution global.vmsEvolution.number}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.vms.cpu}} %</td> <!-- One condition doesn't work -->
|
||||
<td style='color:{{#compare global.vmsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.cpu}}
|
||||
{{#compare global.vmsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{normaliseValue global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.vms.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.ram}}
|
||||
{{#compare global.vmsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{normaliseValue global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk read:</td>
|
||||
<td>{{global.vms.diskRead}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskRead ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskRead}}
|
||||
{{#compare global.vmsEvolution.diskRead ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskRead}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk write:</td>
|
||||
<td>{{global.vms.diskWrite}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskWrite ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskWrite}}
|
||||
{{#compare global.vmsEvolution.diskWrite ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskWrite}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.vms.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netReception}}
|
||||
{{#compare global.vmsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{normaliseValue global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.vms.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netTransmission}}
|
||||
{{#compare global.vmsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{normaliseValue global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -253,7 +205,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} %</td>
|
||||
<td>{{normaliseValue this.value}} %</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
|
||||
@@ -264,7 +216,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -274,7 +226,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} MiB</td>
|
||||
<td>{{normaliseValue this.value}} MiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -284,27 +236,27 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} MiB</td>
|
||||
<td>{{normaliseValue this.value}} MiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topVms.netReception}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topVms.netTransmission}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -318,75 +270,33 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.hosts.number}}</td>
|
||||
<td>
|
||||
{{#if global.hostsEvolution.number}}
|
||||
{{#compare global.hostsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.number}} {{normaliseEvolution global.hostsEvolution.number}}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.hosts.cpu}} %</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.cpu}}
|
||||
{{#compare global.hostsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.hosts.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.ram}}
|
||||
{{#compare global.hostsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
|
||||
</td>
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Load average:</td>
|
||||
<td>{{global.hosts.load}} </td>
|
||||
<td style='color:{{#compare global.hostsEvolution.load ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.load}}
|
||||
{{#compare global.hostsEvolution.load ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.load}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.hosts.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netReception}}
|
||||
{{#compare global.hostsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{normaliseValue global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.hosts.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netTransmission}}
|
||||
{{#compare global.hostsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{normaliseValue global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -408,7 +318,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} %</td>
|
||||
<td>{{normaliseValue this.value}} %</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -418,7 +328,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -428,27 +338,27 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} </td>
|
||||
<td>{{normaliseValue this.value}} </td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netReception}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netTransmission}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -464,11 +374,11 @@
|
||||
<th>Name</th>
|
||||
<th>value</th>
|
||||
</tr>
|
||||
{{#each topAllocation}}
|
||||
{{#each topSrs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.size}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -533,8 +443,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.added}}
|
||||
{{#each vmsRessourcesEvolution.added}}
|
||||
{{#if vmsResourcesEvolution.added}}
|
||||
{{#each vmsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -553,8 +463,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.removed}}
|
||||
{{#each vmsRessourcesEvolution.removed}}
|
||||
{{#if vmsResourcesEvolution.removed}}
|
||||
{{#each vmsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -572,8 +482,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.added}}
|
||||
{{#each hostsRessourcesEvolution.added}}
|
||||
{{#if hostsResourcesEvolution.added}}
|
||||
{{#each hostsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -591,8 +501,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.removed}}
|
||||
{{#each hostsRessourcesEvolution.removed}}
|
||||
{{#if hostsResourcesEvolution.removed}}
|
||||
{{#each hostsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -606,5 +516,81 @@
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{#if allResources}}
|
||||
<div class="page">
|
||||
<div class="top allResources">
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<h3 style="text-align: center;">All resources</h3>
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<table>
|
||||
<caption>VMs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Disk read (MiB)</th>
|
||||
<th>Disk write (MiB)</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.vms}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.diskRead}} {{normaliseEvolution this.evolution.diskRead}}</td>
|
||||
<td>{{normaliseValue this.diskWrite}} {{normaliseEvolution this.evolution.diskWrite}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>Hosts</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Load average</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.hosts}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.load}} {{normaliseEvolution this.evolution.load}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>SRs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>Total space (GiB)</th>
|
||||
<th>Used space (GiB)</th>
|
||||
<th>Free space (GiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.srs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.total}} {{normaliseEvolution this.evolution.total}}</td>
|
||||
<td>{{normaliseValue this.used}}</td>
|
||||
<td>{{normaliseValue this.free}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{/if}}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -6,7 +6,9 @@ import {
|
||||
concat,
|
||||
differenceBy,
|
||||
filter,
|
||||
find,
|
||||
forEach,
|
||||
get,
|
||||
isFinite,
|
||||
map,
|
||||
orderBy,
|
||||
@@ -67,6 +69,10 @@ export const configurationSchema = {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
all: {
|
||||
type: 'boolean',
|
||||
description: "It includes all resources' stats if on.",
|
||||
},
|
||||
periodicity: {
|
||||
type: 'string',
|
||||
enum: ['monthly', 'weekly'],
|
||||
@@ -88,12 +94,12 @@ Handlebars.registerHelper('compare', function (
|
||||
options
|
||||
) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "compare" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "compare" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!compareOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "compare" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "compare" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -104,12 +110,12 @@ Handlebars.registerHelper('compare', function (
|
||||
|
||||
Handlebars.registerHelper('math', function (lvalue, operator, rvalue, options) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "math" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "math" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!mathOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "math" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "math" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -122,6 +128,23 @@ Handlebars.registerHelper('shortUUID', uuid => {
|
||||
}
|
||||
})
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseValue',
|
||||
value => (isFinite(value) ? round(value, 2) : '-')
|
||||
)
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseEvolution',
|
||||
value =>
|
||||
new Handlebars.SafeString(
|
||||
isFinite(+value) && +value !== 0
|
||||
? (value = round(value, 2)) > 0
|
||||
? `(<b style="color: green;">▲ ${value}%</b>)`
|
||||
: `(<b style="color: red;">▼ ${String(value).slice(1)}%</b>)`
|
||||
: ''
|
||||
)
|
||||
)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function computeMean (values) {
|
||||
@@ -137,12 +160,12 @@ function computeMean (values) {
|
||||
return sum / n
|
||||
}
|
||||
|
||||
const computeDoubleMean = val => computeMean(val.map(computeMean))
|
||||
const computeDoubleMean = val => computeMean(map(val, computeMean))
|
||||
|
||||
function computeMeans (objects, options) {
|
||||
return zipObject(
|
||||
options,
|
||||
map(options, opt => round(computeMean(map(objects, opt)), 2))
|
||||
map(options, opt => computeMean(map(objects, opt)), 2)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -163,67 +186,107 @@ function getTop (objects, options) {
|
||||
obj => ({
|
||||
uuid: obj.uuid,
|
||||
name: obj.name,
|
||||
value: round(obj[opt], 2),
|
||||
value: obj[opt],
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
function conputePercentage (curr, prev, options) {
|
||||
function computePercentage (curr, prev, options) {
|
||||
return zipObject(
|
||||
options,
|
||||
map(
|
||||
options,
|
||||
opt =>
|
||||
prev[opt] === 0
|
||||
prev[opt] === 0 || prev[opt] === null
|
||||
? 'NONE'
|
||||
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
|
||||
: `${(curr[opt] - prev[opt]) * 100 / prev[opt]}`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
function getDiff (oldElements, newElements) {
|
||||
return {
|
||||
added: differenceBy(oldElements, newElements, 'uuid'),
|
||||
removed: differenceBy(newElements, oldElements, 'uuid'),
|
||||
added: differenceBy(newElements, oldElements, 'uuid'),
|
||||
removed: differenceBy(oldElements, newElements, 'uuid'),
|
||||
}
|
||||
}
|
||||
|
||||
function getMemoryUsedMetric ({ memory, memoryFree = memory }) {
|
||||
return map(memory, (value, key) => value - memoryFree[key])
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function getVmsStats ({ runningVms, xo }) {
|
||||
return Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(vmStats.stats.memoryUsed) / gibPower,
|
||||
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
|
||||
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
|
||||
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
|
||||
}
|
||||
})
|
||||
async function getVmsStats ({ runningVms, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
|
||||
diskRead:
|
||||
computeDoubleMean(values(get(vmStats.stats.xvds, 'r'))) / mibPower,
|
||||
diskWrite:
|
||||
computeDoubleMean(values(get(vmStats.stats.xvds, 'w'))) / mibPower,
|
||||
netReception:
|
||||
computeDoubleMean(get(vmStats.stats.vifs, 'rx')) / kibPower,
|
||||
netTransmission:
|
||||
computeDoubleMean(get(vmStats.stats.vifs, 'tx')) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getHostsStats ({ runningHosts, xo }) {
|
||||
return Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(hostStats.stats.memoryUsed) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
|
||||
async function getHostsStats ({ runningHosts, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception:
|
||||
computeDoubleMean(get(hostStats.stats.pifs, 'rx')) / kibPower,
|
||||
netTransmission:
|
||||
computeDoubleMean(get(hostStats.stats.pifs, 'tx')) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getSrsStats (xoObjects) {
|
||||
return orderBy(
|
||||
map(filter(xoObjects, obj => obj.type === 'SR' && obj.size > 0), sr => {
|
||||
const total = sr.size / gibPower
|
||||
const used = sr.physical_usage / gibPower
|
||||
let name = sr.name_label
|
||||
if (!sr.shared) {
|
||||
name += ` (${find(xoObjects, { id: sr.$container }).name_label})`
|
||||
}
|
||||
})
|
||||
return {
|
||||
uuid: sr.uuid,
|
||||
name,
|
||||
total,
|
||||
used,
|
||||
free: total - used,
|
||||
}
|
||||
}),
|
||||
'total',
|
||||
'desc'
|
||||
)
|
||||
}
|
||||
|
||||
@@ -303,20 +366,21 @@ function getTopHosts ({ hostsStats, xo }) {
|
||||
])
|
||||
}
|
||||
|
||||
function getMostAllocatedSpaces ({ disks, xo }) {
|
||||
return map(orderBy(disks, ['size'], ['desc']).slice(0, 3), disk => ({
|
||||
uuid: disk.uuid,
|
||||
name: disk.name_label,
|
||||
size: round(disk.size / gibPower, 2),
|
||||
}))
|
||||
function getTopSrs ({ srsStats, xo }) {
|
||||
return getTop(srsStats, ['total']).total
|
||||
}
|
||||
|
||||
async function getHostsMissingPatches ({ runningHosts, xo }) {
|
||||
const hostsMissingPatches = await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostsPatches = await xo
|
||||
let hostsPatches = await xo
|
||||
.getXapi(host)
|
||||
.listMissingPoolPatchesOnHost(host._xapiId)
|
||||
|
||||
if (host.license_params.sku_type === 'free') {
|
||||
hostsPatches = filter(hostsPatches, { paid: false })
|
||||
}
|
||||
|
||||
if (hostsPatches.length > 0) {
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
@@ -347,46 +411,75 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
|
||||
const prevDate = oldStats.style.currDate
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...conputePercentage(newStatsVms, oldStatsVms, [
|
||||
const resourcesOptions = {
|
||||
vms: [
|
||||
'cpu',
|
||||
'ram',
|
||||
'diskRead',
|
||||
'diskWrite',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
],
|
||||
hosts: ['cpu', 'ram', 'load', 'netReception', 'netTransmission'],
|
||||
srs: ['total'],
|
||||
}
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...computePercentage(newStatsVms, oldStatsVms, resourcesOptions.vms),
|
||||
}
|
||||
|
||||
const hostsEvolution = {
|
||||
number: newStatsHosts.number - oldStatsHosts.number,
|
||||
...conputePercentage(newStatsHosts, oldStatsHosts, [
|
||||
'cpu',
|
||||
'ram',
|
||||
'load',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
...computePercentage(
|
||||
newStatsHosts,
|
||||
oldStatsHosts,
|
||||
resourcesOptions.hosts
|
||||
),
|
||||
}
|
||||
|
||||
const vmsRessourcesEvolution = getDiff(
|
||||
const vmsResourcesEvolution = getDiff(
|
||||
oldStatsVms.allVms,
|
||||
newStatsVms.allVms
|
||||
)
|
||||
const hostsRessourcesEvolution = getDiff(
|
||||
const hostsResourcesEvolution = getDiff(
|
||||
oldStatsHosts.allHosts,
|
||||
newStatsHosts.allHosts
|
||||
)
|
||||
|
||||
const usersEvolution = getDiff(oldStats.users, newStats.users)
|
||||
|
||||
const newAllResourcesStats = newStats.allResources
|
||||
const oldAllResourcesStats = oldStats.allResources
|
||||
|
||||
// adding for each resource its evolution
|
||||
if (
|
||||
newAllResourcesStats !== undefined &&
|
||||
oldAllResourcesStats !== undefined
|
||||
) {
|
||||
forEach(newAllResourcesStats, (resource, key) => {
|
||||
const option = resourcesOptions[key]
|
||||
|
||||
if (option !== undefined) {
|
||||
forEach(resource, newItem => {
|
||||
const oldItem = find(oldAllResourcesStats[key], {
|
||||
uuid: newItem.uuid,
|
||||
})
|
||||
|
||||
if (oldItem !== undefined) {
|
||||
newItem.evolution = computePercentage(newItem, oldItem, option)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
vmsEvolution,
|
||||
hostsEvolution,
|
||||
prevDate,
|
||||
vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution,
|
||||
hostsResourcesEvolution,
|
||||
usersEvolution,
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -394,7 +487,7 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
}
|
||||
}
|
||||
|
||||
async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
async function dataBuilder ({ xo, storedStatsPath, all }) {
|
||||
const xoObjects = values(xo.getObjects())
|
||||
const runningVms = filter(xoObjects, { type: 'VM', power_state: 'Running' })
|
||||
const haltedVms = filter(xoObjects, { type: 'VM', power_state: 'Halted' })
|
||||
@@ -403,18 +496,17 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
power_state: 'Running',
|
||||
})
|
||||
const haltedHosts = filter(xoObjects, { type: 'host', power_state: 'Halted' })
|
||||
const disks = filter(xoObjects, { type: 'SR' })
|
||||
const [
|
||||
users,
|
||||
vmsStats,
|
||||
hostsStats,
|
||||
topAllocation,
|
||||
srsStats,
|
||||
hostsMissingPatches,
|
||||
] = await Promise.all([
|
||||
xo.getAllUsers(),
|
||||
getVmsStats({ xo, runningVms }),
|
||||
getHostsStats({ xo, runningHosts }),
|
||||
getMostAllocatedSpaces({ xo, disks }),
|
||||
getSrsStats(xoObjects),
|
||||
getHostsMissingPatches({ xo, runningHosts }),
|
||||
])
|
||||
|
||||
@@ -423,35 +515,50 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
globalHostsStats,
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
usersEmail,
|
||||
] = await Promise.all([
|
||||
computeGlobalVmsStats({ xo, vmsStats, haltedVms }),
|
||||
computeGlobalHostsStats({ xo, hostsStats, haltedHosts }),
|
||||
getTopVms({ xo, vmsStats }),
|
||||
getTopHosts({ xo, hostsStats }),
|
||||
getTopSrs({ xo, srsStats }),
|
||||
getAllUsersEmail(users),
|
||||
])
|
||||
|
||||
let allResources
|
||||
if (all) {
|
||||
allResources = {
|
||||
vms: vmsStats,
|
||||
hosts: hostsStats,
|
||||
srs: srsStats,
|
||||
date: currDate,
|
||||
}
|
||||
}
|
||||
|
||||
const evolution = await computeEvolution({
|
||||
allResources,
|
||||
storedStatsPath,
|
||||
hosts: globalHostsStats,
|
||||
usersEmail,
|
||||
vms: globalVmsStats,
|
||||
})
|
||||
|
||||
const data = {
|
||||
return {
|
||||
allResources,
|
||||
global: {
|
||||
vms: globalVmsStats,
|
||||
hosts: globalHostsStats,
|
||||
vmsEvolution: evolution && evolution.vmsEvolution,
|
||||
hostsEvolution: evolution && evolution.hostsEvolution,
|
||||
},
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
topVms,
|
||||
hostsMissingPatches,
|
||||
usersEmail,
|
||||
topAllocation,
|
||||
vmsRessourcesEvolution: evolution && evolution.vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution: evolution && evolution.hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution: evolution && evolution.vmsResourcesEvolution,
|
||||
hostsResourcesEvolution: evolution && evolution.hostsResourcesEvolution,
|
||||
usersEvolution: evolution && evolution.usersEvolution,
|
||||
style: {
|
||||
imgXo,
|
||||
@@ -460,8 +567,6 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
page: '{{page}}',
|
||||
},
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -472,6 +577,10 @@ class UsageReportPlugin {
|
||||
this._dir = getDataDir
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._xo.addApiMethod(
|
||||
'plugin.usageReport.send',
|
||||
this._sendReport.bind(this, false)
|
||||
)
|
||||
}
|
||||
|
||||
configure (configuration, state) {
|
||||
@@ -485,7 +594,7 @@ class UsageReportPlugin {
|
||||
configuration.periodicity === 'monthly' ? '00 06 1 * *' : '00 06 * * 0'
|
||||
).createJob(async () => {
|
||||
try {
|
||||
await this._sendReport()
|
||||
await this._sendReport(true)
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'[WARN] scheduled function:',
|
||||
@@ -511,13 +620,14 @@ class UsageReportPlugin {
|
||||
}
|
||||
|
||||
test () {
|
||||
return this._sendReport()
|
||||
return this._sendReport(true)
|
||||
}
|
||||
|
||||
async _sendReport () {
|
||||
async _sendReport (storeData) {
|
||||
const data = await dataBuilder({
|
||||
xo: this._xo,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
all: this._conf.all,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
@@ -537,10 +647,11 @@ class UsageReportPlugin {
|
||||
},
|
||||
],
|
||||
}),
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
storeData &&
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,12 +8,14 @@ try {
|
||||
const filtered = frames.filter(function (frame) {
|
||||
const name = frame && frame.getFileName()
|
||||
|
||||
return (// has a filename
|
||||
return (
|
||||
// has a filename
|
||||
name &&
|
||||
// contains a separator (no internal modules)
|
||||
name.indexOf(sep) !== -1 &&
|
||||
// does not start with `internal`
|
||||
name.lastIndexOf('internal', 0) !== -1)
|
||||
name.lastIndexOf('internal', 0) !== -1
|
||||
)
|
||||
})
|
||||
|
||||
// depd (used amongst other by express requires at least 3 frames
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server",
|
||||
"version": "5.17.3",
|
||||
"version": "5.19.9",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -31,14 +31,15 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.40",
|
||||
"@babel/polyfill": "7.0.0-beta.49",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"@xen-orchestra/cron": "^1.0.2",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^0.0.1",
|
||||
"ajv": "^6.1.1",
|
||||
"app-conf": "^0.5.0",
|
||||
"archiver": "^2.1.0",
|
||||
"base64url": "^2.0.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^3.0.0",
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
"bluebird": "^3.5.1",
|
||||
@@ -53,28 +54,30 @@
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"execa": "^0.9.0",
|
||||
"execa": "^0.10.0",
|
||||
"express": "^4.16.2",
|
||||
"express-session": "^1.15.6",
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"fs-extra": "^6.0.1",
|
||||
"get-stream": "^3.0.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.6.2",
|
||||
"hashy": "^0.7.1",
|
||||
"helmet": "^3.9.0",
|
||||
"highland": "^2.11.1",
|
||||
"http-proxy": "^1.16.2",
|
||||
"http-request-plus": "^0.5.0",
|
||||
"http-server-plus": "^0.8.0",
|
||||
"http-server-plus": "^0.10.0",
|
||||
"human-format": "^0.10.0",
|
||||
"is-redirect": "^1.0.0",
|
||||
"jest-worker": "^23.0.0",
|
||||
"js-yaml": "^3.10.0",
|
||||
"json-rpc-peer": "^0.15.3",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"julien-f-source-map-support": "0.1.0",
|
||||
"julien-f-unzip": "^0.2.1",
|
||||
"kindof": "^2.0.0",
|
||||
"level": "^3.0.0",
|
||||
"level": "^4.0.0",
|
||||
"level-party": "^3.0.4",
|
||||
"level-sublevel": "^6.6.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
@@ -90,43 +93,47 @@
|
||||
"partial-stream": "0.0.0",
|
||||
"passport": "^0.4.0",
|
||||
"passport-local": "^1.0.0",
|
||||
"pretty-format": "^22.0.3",
|
||||
"pretty-format": "^23.0.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"proxy-agent": "^2.1.0",
|
||||
"proxy-agent": "^3.0.0",
|
||||
"pug": "^2.0.0-rc.4",
|
||||
"pw": "^0.0.4",
|
||||
"redis": "^2.8.0",
|
||||
"schema-inspector": "^1.6.8",
|
||||
"semver": "^5.4.1",
|
||||
"serve-static": "^1.13.1",
|
||||
"split-lines": "^1.1.0",
|
||||
"split-lines": "^2.0.0",
|
||||
"stack-chain": "^2.0.0",
|
||||
"stoppable": "^1.0.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"tar-stream": "^1.5.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.1.1",
|
||||
"ws": "^5.0.0",
|
||||
"xen-api": "^0.16.6",
|
||||
"xen-api": "^0.16.9",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.2.3",
|
||||
"xo-acl-resolver": "^0.2.4",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.1.1",
|
||||
"xo-remote-parser": "^0.3",
|
||||
"xo-vmdk-to-vhd": "0.0.12"
|
||||
"xo-vmdk-to-vhd": "^0.1.2",
|
||||
"yazl": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-decorators": "7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.40",
|
||||
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.49",
|
||||
"@babel/core": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-decorators": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.49",
|
||||
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.49",
|
||||
"@babel/preset-env": "7.0.0-beta.49",
|
||||
"@babel/preset-flow": "7.0.0-beta.49",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
import { basename } from 'path'
|
||||
import { isEmpty, pickBy } from 'lodash'
|
||||
|
||||
import { safeDateFormat } from '../utils'
|
||||
|
||||
export function createJob ({ schedules, ...job }) {
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules)
|
||||
@@ -27,11 +32,25 @@ createJob.params = {
|
||||
settings: {
|
||||
type: 'object',
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
},
|
||||
}
|
||||
|
||||
export function migrateLegacyJob ({ id }) {
|
||||
return this.migrateLegacyBackupJob(id)
|
||||
}
|
||||
migrateLegacyJob.permission = 'admin'
|
||||
migrateLegacyJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function deleteJob ({ id }) {
|
||||
return this.deleteBackupNgJob(id)
|
||||
}
|
||||
@@ -71,6 +90,10 @@ editJob.params = {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
@@ -95,8 +118,8 @@ getJob.params = {
|
||||
},
|
||||
}
|
||||
|
||||
export async function runJob ({ id, schedule }) {
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule))
|
||||
export async function runJob ({ id, schedule, vm }) {
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule), vm)
|
||||
}
|
||||
|
||||
runJob.permission = 'admin'
|
||||
@@ -108,10 +131,23 @@ runJob.params = {
|
||||
schedule: {
|
||||
type: 'string',
|
||||
},
|
||||
vm: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export async function getAllLogs (filter) {
|
||||
const logs = await this.getBackupNgLogs()
|
||||
return isEmpty(filter) ? logs : pickBy(logs, filter)
|
||||
}
|
||||
|
||||
getAllLogs.permission = 'admin'
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function deleteVmBackup ({ id }) {
|
||||
return this.deleteVmBackupNg(id)
|
||||
}
|
||||
@@ -153,3 +189,88 @@ importVmBackup.params = {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function listPartitions ({ remote, disk }) {
|
||||
return this.listBackupNgDiskPartitions(remote, disk)
|
||||
}
|
||||
|
||||
listPartitions.permission = 'admin'
|
||||
|
||||
listPartitions.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function listFiles ({ remote, disk, partition, path }) {
|
||||
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
|
||||
}
|
||||
|
||||
listFiles.permission = 'admin'
|
||||
|
||||
listFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
|
||||
const zipStream = await this.fetchBackupNgPartitionFiles(
|
||||
remote,
|
||||
disk,
|
||||
partition,
|
||||
paths
|
||||
)
|
||||
|
||||
res.setHeader('content-disposition', 'attachment')
|
||||
res.setHeader('content-type', 'application/octet-stream')
|
||||
return zipStream
|
||||
}
|
||||
|
||||
export async function fetchFiles (params) {
|
||||
const { paths } = params
|
||||
let filename = `restore_${safeDateFormat(new Date())}`
|
||||
if (paths.length === 1) {
|
||||
filename += `_${basename(paths[0])}`
|
||||
}
|
||||
filename += '.zip'
|
||||
|
||||
return this.registerHttpRequest(handleFetchFiles, params, {
|
||||
suffix: encodeURI(`/${filename}`),
|
||||
}).then(url => ({ $getFrom: url }))
|
||||
}
|
||||
|
||||
fetchFiles.permission = 'admin'
|
||||
|
||||
fetchFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
optional: true,
|
||||
type: 'string',
|
||||
},
|
||||
paths: {
|
||||
items: { type: 'string' },
|
||||
minLength: 1,
|
||||
type: 'array',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -76,6 +76,21 @@ export { restartAgent as restart_agent } // eslint-disable-line camelcase
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function setRemoteSyslogHost ({ host, syslogDestination }) {
|
||||
return this.getXapi(host).setRemoteSyslogHost(host._xapiId, syslogDestination)
|
||||
}
|
||||
|
||||
setRemoteSyslogHost.params = {
|
||||
id: { type: 'string' },
|
||||
syslogDestination: { type: 'string' },
|
||||
}
|
||||
|
||||
setRemoteSyslogHost.resolve = {
|
||||
host: ['id', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function start ({ host }) {
|
||||
return this.getXapi(host).powerOnHost(host._xapiId)
|
||||
}
|
||||
@@ -242,7 +257,7 @@ emergencyShutdownHost.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ host, granularity }) {
|
||||
return this.getXapiHostStats(host, granularity)
|
||||
return this.getXapiHostStats(host._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the host'
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
export async function get ({ namespace }) {
|
||||
const logger = await this.getLogger(namespace)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const logs = {}
|
||||
|
||||
logger
|
||||
.createReadStream()
|
||||
.on('data', data => {
|
||||
logs[data.key] = data.value
|
||||
})
|
||||
.on('end', () => {
|
||||
resolve(logs)
|
||||
})
|
||||
.on('error', reject)
|
||||
})
|
||||
export function get ({ namespace }) {
|
||||
return this.getLogs(namespace)
|
||||
}
|
||||
|
||||
get.description = 'returns logs list for one namespace'
|
||||
|
||||
@@ -189,6 +189,7 @@ export async function createNfs ({
|
||||
server,
|
||||
serverPath,
|
||||
nfsVersion,
|
||||
nfsOptions,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
@@ -202,6 +203,11 @@ export async function createNfs ({
|
||||
deviceConfig.nfsversion = nfsVersion
|
||||
}
|
||||
|
||||
// if NFS options given
|
||||
if (nfsOptions) {
|
||||
deviceConfig.options = nfsOptions
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
@@ -226,6 +232,7 @@ createNfs.params = {
|
||||
server: { type: 'string' },
|
||||
serverPath: { type: 'string' },
|
||||
nfsVersion: { type: 'string', optional: true },
|
||||
nfsOptions: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
createNfs.resolve = {
|
||||
@@ -241,7 +248,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
scsiId,
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
@@ -251,7 +258,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvmoohba', // SR LVM over HBA
|
||||
'lvmohba', // SR LVM over HBA
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
@@ -366,7 +373,7 @@ export async function probeHba ({ host }) {
|
||||
let xml
|
||||
|
||||
try {
|
||||
await xapi.call('SR.probe', host._xapiRef, 'type', {})
|
||||
await xapi.call('SR.probe', host._xapiRef, {}, 'lvmohba', {})
|
||||
|
||||
throw new Error('the call above should have thrown an error')
|
||||
} catch (error) {
|
||||
@@ -382,7 +389,7 @@ export async function probeHba ({ host }) {
|
||||
hbaDevices.push({
|
||||
hba: hbaDevice.hba.trim(),
|
||||
path: hbaDevice.path.trim(),
|
||||
scsciId: hbaDevice.SCSIid.trim(),
|
||||
scsiId: hbaDevice.SCSIid.trim(),
|
||||
size: hbaDevice.size.trim(),
|
||||
vendor: hbaDevice.vendor.trim(),
|
||||
})
|
||||
@@ -487,8 +494,8 @@ export async function probeIscsiIqns ({
|
||||
|
||||
// if we give user and password
|
||||
if (chapUser && chapPassword) {
|
||||
deviceConfig.chapUser = chapUser
|
||||
deviceConfig.chapPassword = chapPassword
|
||||
deviceConfig.chapuser = chapUser
|
||||
deviceConfig.chappassword = chapPassword
|
||||
}
|
||||
|
||||
// if we give another port than default iSCSI
|
||||
@@ -668,6 +675,34 @@ probeIscsiExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this HBA already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
|
||||
export async function probeHbaExists ({ host, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const xml = parseXml(
|
||||
await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'lvmohba', {})
|
||||
)
|
||||
|
||||
// get the UUID of SR connected to this LUN
|
||||
return ensureArray(xml.SRlist.SR).map(sr => ({ uuid: sr.UUID.trim() }))
|
||||
}
|
||||
|
||||
probeHbaExists.params = {
|
||||
host: { type: 'string' },
|
||||
scsiId: { type: 'string' },
|
||||
}
|
||||
|
||||
probeHbaExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this NFS SR already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
@@ -803,3 +838,23 @@ getUnhealthyVdiChainsLength.params = {
|
||||
getUnhealthyVdiChainsLength.resolve = {
|
||||
sr: ['id', 'SR', 'operate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ sr, granularity }) {
|
||||
return this.getXapiSrStats(sr._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the sr'
|
||||
|
||||
stats.params = {
|
||||
id: { type: 'string' },
|
||||
granularity: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
stats.resolve = {
|
||||
sr: ['id', 'SR', 'view'],
|
||||
}
|
||||
|
||||
@@ -12,6 +12,10 @@ import { forEach, map, mapFilter, parseSize } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export function getHaValues () {
|
||||
return ['best-effort', 'restart', '']
|
||||
}
|
||||
|
||||
function checkPermissionOnSrs (vm, permission = 'operate') {
|
||||
const permissions = []
|
||||
forEach(vm.$VBDs, vbdId => {
|
||||
@@ -46,11 +50,16 @@ const extract = (obj, prop) => {
|
||||
export async function create (params) {
|
||||
const { user } = this
|
||||
const resourceSet = extract(params, 'resourceSet')
|
||||
if (resourceSet === undefined && user.permission !== 'admin') {
|
||||
const template = extract(params, 'template')
|
||||
if (
|
||||
resourceSet === undefined &&
|
||||
!(await this.hasPermissions(this.user.id, [
|
||||
[template.$pool, 'administrate'],
|
||||
]))
|
||||
) {
|
||||
throw unauthorized()
|
||||
}
|
||||
|
||||
const template = extract(params, 'template')
|
||||
params.template = template._xapiId
|
||||
|
||||
const xapi = this.getXapi(template)
|
||||
@@ -151,16 +160,17 @@ export async function create (params) {
|
||||
await Promise.all([
|
||||
params.share
|
||||
? Promise.all(
|
||||
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
|
||||
this.addAcl(subjectId, vm.id, 'admin')
|
||||
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
|
||||
this.addAcl(subjectId, vm.id, 'admin')
|
||||
)
|
||||
)
|
||||
)
|
||||
: this.addAcl(user.id, vm.id, 'admin'),
|
||||
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet),
|
||||
])
|
||||
}
|
||||
|
||||
for (const vif of xapiVm.$VIFs) {
|
||||
xapi.xo.addObject(vif)
|
||||
await this.allocIpAddresses(
|
||||
vif.$id,
|
||||
concat(vif.ipv4_allowed, vif.ipv6_allowed)
|
||||
@@ -322,6 +332,7 @@ create.resolve = {
|
||||
async function delete_ ({
|
||||
delete_disks, // eslint-disable-line camelcase
|
||||
force,
|
||||
forceDeleteDefaultTemplate,
|
||||
vm,
|
||||
|
||||
deleteDisks = delete_disks,
|
||||
@@ -362,7 +373,12 @@ async function delete_ ({
|
||||
;this.setVmResourceSet(vm._xapiId, null)::ignoreErrors()
|
||||
}
|
||||
|
||||
return xapi.deleteVm(vm._xapiId, deleteDisks, force)
|
||||
return xapi.deleteVm(
|
||||
vm._xapiId,
|
||||
deleteDisks,
|
||||
force,
|
||||
forceDeleteDefaultTemplate
|
||||
)
|
||||
}
|
||||
|
||||
delete_.params = {
|
||||
@@ -377,6 +393,11 @@ delete_.params = {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
|
||||
forceDeleteDefaultTemplate: {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
}
|
||||
delete_.resolve = {
|
||||
vm: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate'],
|
||||
@@ -451,7 +472,7 @@ export async function migrate ({
|
||||
})
|
||||
}
|
||||
|
||||
if (!await this.hasPermissions(this.session.get('user_id'), permissions)) {
|
||||
if (!(await this.hasPermissions(this.session.get('user_id'), permissions))) {
|
||||
throw unauthorized()
|
||||
}
|
||||
|
||||
@@ -544,11 +565,11 @@ set.params = {
|
||||
|
||||
name_description: { type: 'string', optional: true },
|
||||
|
||||
// TODO: provides better filtering of values for HA possible values: "best-
|
||||
// effort" meaning "try to restart this VM if possible but don't consider the
|
||||
// Pool to be overcommitted if this is not possible"; "restart" meaning "this
|
||||
// VM should be restarted"; "" meaning "do not try to restart this VM"
|
||||
high_availability: { type: 'boolean', optional: true },
|
||||
high_availability: {
|
||||
optional: true,
|
||||
pattern: new RegExp(`^(${getHaValues().join('|')})$`),
|
||||
type: 'string',
|
||||
},
|
||||
|
||||
// Number of virtual CPUs to allocate.
|
||||
CPUs: { type: 'integer', optional: true },
|
||||
@@ -585,6 +606,9 @@ set.params = {
|
||||
|
||||
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
|
||||
|
||||
// Emulate HVM C000 PCI device for Windows Update to fetch or update PV drivers
|
||||
hasVendorDevice: { type: 'boolean', optional: true },
|
||||
|
||||
// Move the vm In to/Out of Self Service
|
||||
resourceSet: { type: ['string', 'null'], optional: true },
|
||||
|
||||
@@ -688,9 +712,9 @@ copy.resolve = {
|
||||
export async function convertToTemplate ({ vm }) {
|
||||
// Convert to a template requires pool admin permission.
|
||||
if (
|
||||
!await this.hasPermissions(this.session.get('user_id'), [
|
||||
!(await this.hasPermissions(this.session.get('user_id'), [
|
||||
[vm.$pool, 'administrate'],
|
||||
])
|
||||
]))
|
||||
) {
|
||||
throw unauthorized()
|
||||
}
|
||||
@@ -1250,7 +1274,9 @@ export async function createInterface ({
|
||||
await this.checkResourceSetConstraints(resourceSet, this.user.id, [
|
||||
network.id,
|
||||
])
|
||||
} else if (!await this.hasPermissions(this.user.id, [[network.id, 'view']])) {
|
||||
} else if (
|
||||
!(await this.hasPermissions(this.user.id, [[network.id, 'view']]))
|
||||
) {
|
||||
throw unauthorized()
|
||||
}
|
||||
|
||||
@@ -1339,7 +1365,7 @@ detachPci.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ vm, granularity }) {
|
||||
return this.getXapiVmStats(vm, granularity)
|
||||
return this.getXapiVmStats(vm._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistics about the VM'
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import getStream from 'get-stream'
|
||||
import { forEach } from 'lodash'
|
||||
|
||||
import { streamToBuffer } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export function clean () {
|
||||
@@ -42,7 +41,9 @@ function handleGetAllObjects (req, res, { filter, limit }) {
|
||||
|
||||
export function getAllObjects ({ filter, limit, ndjson = false }) {
|
||||
return ndjson
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then($getFrom => ({ $getFrom }))
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then(
|
||||
$getFrom => ({ $getFrom })
|
||||
)
|
||||
: this.getObjects({ filter, limit })
|
||||
}
|
||||
|
||||
@@ -59,7 +60,7 @@ getAllObjects.params = {
|
||||
export async function importConfig () {
|
||||
return {
|
||||
$sendTo: await this.registerHttpRequest(async (req, res) => {
|
||||
await this.importConfig(JSON.parse(await streamToBuffer(req)))
|
||||
await this.importConfig(JSON.parse(await getStream.buffer(req)))
|
||||
|
||||
res.end('config successfully imported')
|
||||
}),
|
||||
|
||||
@@ -665,7 +665,9 @@ export const createSR = defer(async function (
|
||||
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 0 }
|
||||
|
||||
const tmpBoundObjectId = srs.join(',')
|
||||
const tmpBoundObjectId = `tmp_${srs.join(',')}_${Math.random()
|
||||
.toString(32)
|
||||
.slice(2)}`
|
||||
const license = await this.createBoundXosanTrialLicense({
|
||||
boundObjectId: tmpBoundObjectId,
|
||||
})
|
||||
|
||||
@@ -8,7 +8,7 @@ describe('debounce()', () => {
|
||||
let i
|
||||
|
||||
class Foo {
|
||||
@debounce(1e1)
|
||||
@debounce(10)
|
||||
foo () {
|
||||
++i
|
||||
}
|
||||
@@ -18,22 +18,28 @@ describe('debounce()', () => {
|
||||
i = 0
|
||||
})
|
||||
|
||||
it('works', done => {
|
||||
const foo = new Foo()
|
||||
it('works', () => {
|
||||
const savedNow = Date.now
|
||||
try {
|
||||
const now = Date.now()
|
||||
const mockDate = jest.fn()
|
||||
Date.now = mockDate
|
||||
const foo = new Foo()
|
||||
expect(i).toBe(0)
|
||||
|
||||
expect(i).toBe(0)
|
||||
mockDate.mockReturnValueOnce(now)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
mockDate.mockReturnValueOnce(now + 2)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
setTimeout(() => {
|
||||
mockDate.mockReturnValueOnce(now + 2 + 10)
|
||||
foo.foo()
|
||||
expect(i).toBe(2)
|
||||
|
||||
done()
|
||||
}, 2e1)
|
||||
} finally {
|
||||
Date.now = savedNow
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -7,8 +7,10 @@ import has from 'lodash/has'
|
||||
import helmet from 'helmet'
|
||||
import includes from 'lodash/includes'
|
||||
import proxyConsole from './proxy-console'
|
||||
import pw from 'pw'
|
||||
import serveStatic from 'serve-static'
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import stoppable from 'stoppable'
|
||||
import WebSocket from 'ws'
|
||||
import { compile as compilePug } from 'pug'
|
||||
import { createServer as createProxyServer } from 'http-proxy'
|
||||
@@ -226,12 +228,12 @@ async function registerPlugin (pluginPath, pluginName) {
|
||||
// instance.
|
||||
const instance = isFunction(factory)
|
||||
? factory({
|
||||
xo: this,
|
||||
getDataDir: () => {
|
||||
const dir = `${this._config.datadir}/${pluginName}`
|
||||
return ensureDir(dir).then(() => dir)
|
||||
},
|
||||
})
|
||||
xo: this,
|
||||
getDataDir: () => {
|
||||
const dir = `${this._config.datadir}/${pluginName}`
|
||||
return ensureDir(dir).then(() => dir)
|
||||
},
|
||||
})
|
||||
: factory
|
||||
|
||||
await this.registerPlugin(
|
||||
@@ -310,6 +312,13 @@ async function makeWebServerListen (
|
||||
) {
|
||||
if (cert && key) {
|
||||
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
|
||||
if (opts.key.includes('ENCRYPTED')) {
|
||||
opts.passphrase = await new Promise(resolve => {
|
||||
console.log('Encrypted key %s', key)
|
||||
process.stdout.write(`Enter pass phrase: `)
|
||||
pw(resolve)
|
||||
})
|
||||
}
|
||||
}
|
||||
try {
|
||||
const niceAddress = await webServer.listen(opts)
|
||||
@@ -332,7 +341,7 @@ async function makeWebServerListen (
|
||||
}
|
||||
|
||||
async function createWebServer ({ listen, listenOptions }) {
|
||||
const webServer = new WebServer()
|
||||
const webServer = stoppable(new WebServer())
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(listen, opts =>
|
||||
@@ -498,7 +507,7 @@ const setUpConsoleProxy = (webServer, xo) => {
|
||||
const { token } = parseCookies(req.headers.cookie)
|
||||
|
||||
const user = await xo.authenticateUser({ token })
|
||||
if (!await xo.hasPermissions(user.id, [[id, 'operate']])) {
|
||||
if (!(await xo.hasPermissions(user.id, [[id, 'operate']]))) {
|
||||
throw invalidCredentials()
|
||||
}
|
||||
|
||||
@@ -537,9 +546,14 @@ export default async function main (args) {
|
||||
|
||||
{
|
||||
const debug = createLogger('xo:perf')
|
||||
blocked(ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
})
|
||||
blocked(
|
||||
ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
},
|
||||
{
|
||||
threshold: 500,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
const config = await loadConfiguration()
|
||||
@@ -565,7 +579,7 @@ export default async function main (args) {
|
||||
const xo = new Xo(config)
|
||||
|
||||
// Register web server close on XO stop.
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.close(cb)))
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.stop(cb)))
|
||||
|
||||
// Connects to all registered servers.
|
||||
await xo.start()
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { isArray, map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => (fields, ...args) =>
|
||||
execa
|
||||
.stdout(command, [
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
return splitLines(
|
||||
await execa.stdout(command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
.then(stdout =>
|
||||
map(
|
||||
splitLines(stdout),
|
||||
isArray(fields)
|
||||
? parse
|
||||
: line => {
|
||||
const data = parse(line)
|
||||
return data[fields]
|
||||
}
|
||||
)
|
||||
)
|
||||
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
|
||||
@@ -13,9 +13,18 @@ export default {
|
||||
type: 'string',
|
||||
description: 'identifier of this job',
|
||||
},
|
||||
scheduleId: {
|
||||
type: 'string',
|
||||
description: 'identifier of the schedule which ran the job',
|
||||
},
|
||||
key: {
|
||||
type: 'string',
|
||||
},
|
||||
type: {
|
||||
default: 'call',
|
||||
enum: ['backup', 'call'],
|
||||
},
|
||||
data: {},
|
||||
},
|
||||
required: ['event', 'userId', 'jobId', 'key'],
|
||||
required: ['event', 'userId', 'jobId'],
|
||||
}
|
||||
|
||||
18
packages/xo-server/src/schemas/log/taskEnd.js
Normal file
18
packages/xo-server/src/schemas/log/taskEnd.js
Normal file
@@ -0,0 +1,18 @@
|
||||
export default {
|
||||
$schema: 'http://json-schema.org/draft-04/schema#',
|
||||
type: 'object',
|
||||
properties: {
|
||||
event: {
|
||||
enum: ['task.end'],
|
||||
},
|
||||
taskId: {
|
||||
type: 'string',
|
||||
description: 'identifier of this task',
|
||||
},
|
||||
status: {
|
||||
enum: ['canceled', 'failure', 'success'],
|
||||
},
|
||||
result: {},
|
||||
},
|
||||
required: ['event', 'taskId', 'status'],
|
||||
}
|
||||
15
packages/xo-server/src/schemas/log/taskStart.js
Normal file
15
packages/xo-server/src/schemas/log/taskStart.js
Normal file
@@ -0,0 +1,15 @@
|
||||
export default {
|
||||
$schema: 'http://json-schema.org/draft-04/schema#',
|
||||
type: 'object',
|
||||
properties: {
|
||||
event: {
|
||||
enum: ['task.start'],
|
||||
},
|
||||
parentId: {
|
||||
type: 'string',
|
||||
description: 'identifier of the parent task or job',
|
||||
},
|
||||
data: {},
|
||||
},
|
||||
required: ['event'],
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
export { streamToNewBuffer as default }
|
||||
@@ -13,7 +13,7 @@ import pick from 'lodash/pick'
|
||||
import tmp from 'tmp'
|
||||
import xml2js from 'xml2js'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { resolve } from 'path'
|
||||
import { dirname, resolve } from 'path'
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
import {
|
||||
all as pAll,
|
||||
@@ -55,10 +55,6 @@ export const asyncMap = (collection, iteratee) => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export streamToBuffer from './stream-to-new-buffer'
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function camelToSnakeCase (string) {
|
||||
return string.replace(
|
||||
/([a-z0-9])([A-Z])/g,
|
||||
@@ -319,6 +315,12 @@ export const popProperty = obj => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// resolve a relative path from a file
|
||||
export const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const safeDateFormat = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
|
||||
@@ -15,6 +15,8 @@ declare export function asyncMap<K, V1, V2>(
|
||||
|
||||
declare export function getPseudoRandomBytes(n: number): Buffer
|
||||
|
||||
declare export function resolveRelativeFromFile(file: string, path: string): string
|
||||
|
||||
declare export function safeDateFormat(timestamp: number): string
|
||||
|
||||
declare export function serializeError(error: Error): Object
|
||||
|
||||
@@ -1,768 +0,0 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
import fu from '@nraynaud/struct-fu'
|
||||
import isEqual from 'lodash/isEqual'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import type RemoteHandler from './remote-handlers/abstract'
|
||||
import constantStream from './constant-stream'
|
||||
import { noop, streamToBuffer } from './utils'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-util]${str}`) : noop
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
// Sizes in bytes.
|
||||
const VHD_FOOTER_SIZE = 512
|
||||
const VHD_HEADER_SIZE = 1024
|
||||
const VHD_SECTOR_SIZE = 512
|
||||
|
||||
// Block allocation table entry size. (Block addr)
|
||||
const VHD_ENTRY_SIZE = 4
|
||||
|
||||
const VHD_PARENT_LOCATOR_ENTRIES = 8
|
||||
const VHD_PLATFORM_CODE_NONE = 0
|
||||
|
||||
// Types of backup treated. Others are not supported.
|
||||
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
|
||||
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
|
||||
|
||||
// Other.
|
||||
const BLOCK_UNUSED = 0xffffffff
|
||||
const BIT_MASK = 0x80
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
],
|
||||
VHD_PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUp = bytes =>
|
||||
Math.floor((bytes + VHD_SECTOR_SIZE - 1) / VHD_SECTOR_SIZE)
|
||||
const sectorsRoundUpNoZero = bytes => sectorsRoundUp(bytes) || 1
|
||||
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
|
||||
|
||||
// Check/Set a bit on a vhd map.
|
||||
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
|
||||
const mapSetBit = (map, bit) => {
|
||||
map[bit >> 3] |= BIT_MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
function checksumStruct (rawStruct, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
|
||||
let sum = 0
|
||||
|
||||
// Reset current sum.
|
||||
packField(checksumField, 0, rawStruct)
|
||||
|
||||
for (let i = 0, n = struct.size; i < n; i++) {
|
||||
sum = (sum + rawStruct[i]) & 0xffffffff
|
||||
}
|
||||
|
||||
sum = 0xffffffff - sum
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, rawStruct)
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export class Vhd {
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Read functions.
|
||||
// =================================================================
|
||||
|
||||
_readStream (start, n) {
|
||||
return this._handler.createReadStream(this._fd ? this._fd : this._path, {
|
||||
start,
|
||||
end: start + n - 1, // end is inclusive
|
||||
})
|
||||
}
|
||||
|
||||
_read (start, n) {
|
||||
return this._readStream(start, n).then(streamToBuffer)
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = uint32ToUint64(this.footer.dataOffset) + VHD_HEADER_SIZE
|
||||
|
||||
const blockAllocationTableSize = sectorsToBytes(
|
||||
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
|
||||
)
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(
|
||||
end,
|
||||
uint32ToUint64(header.tableOffset) + blockAllocationTableSize
|
||||
)
|
||||
|
||||
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
|
||||
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
uint32ToUint64(entry.platformDataOffset) +
|
||||
sectorsToBytes(entry.platformDataSpace)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of headers: ${end}.`)
|
||||
|
||||
return end
|
||||
}
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData () {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
for (let i = 0; i < maxTableEntries; i++) {
|
||||
const blockAddr = this._getBatEntry(i)
|
||||
|
||||
if (blockAddr !== BLOCK_UNUSED) {
|
||||
end = Math.max(end, blockAddr + fullBlockSize)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of data: ${end}.`)
|
||||
|
||||
return sectorsToBytes(end)
|
||||
}
|
||||
|
||||
// Get the beginning (footer + header) of a vhd file.
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
|
||||
|
||||
const sum = unpackField(fuFooter.fields.checksum, buf)
|
||||
const sumToTest = checksumStruct(buf, fuFooter)
|
||||
|
||||
// Checksum child & parent.
|
||||
if (sumToTest !== sum) {
|
||||
throw new Error(
|
||||
`Bad checksum in vhd. Expected: ${sum}. Given: ${sumToTest}. (data=${buf.toString(
|
||||
'hex'
|
||||
)})`
|
||||
)
|
||||
}
|
||||
|
||||
const header = (this.header = fuHeader.unpack(buf.slice(VHD_FOOTER_SIZE)))
|
||||
this.footer = fuFooter.unpack(buf)
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
|
||||
header.blockSize / VHD_SECTOR_SIZE
|
||||
))
|
||||
|
||||
// Compute bitmap size in sectors.
|
||||
// Default: 1.
|
||||
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
|
||||
sectorsPerBlock >> 3
|
||||
))
|
||||
|
||||
// Full block size => data block size + bitmap size.
|
||||
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
|
||||
|
||||
// In bytes.
|
||||
// Default: 512.
|
||||
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
|
||||
}
|
||||
|
||||
// Check if a vhd object has a block allocation table.
|
||||
hasBlockAllocationTableMap () {
|
||||
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
|
||||
}
|
||||
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockTable () {
|
||||
const { header } = this
|
||||
|
||||
const offset = uint32ToUint64(header.tableOffset)
|
||||
const size = sectorsToBytes(
|
||||
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
|
||||
)
|
||||
|
||||
this.blockTable = await this._read(offset, size)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry (block) {
|
||||
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
|
||||
}
|
||||
|
||||
_readBlock (blockId, onlyBitmap = false) {
|
||||
const blockAddr = this._getBatEntry(blockId)
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
throw new Error(`no such block ${blockId}`)
|
||||
}
|
||||
|
||||
return this._read(
|
||||
sectorsToBytes(blockAddr),
|
||||
onlyBitmap ? this.bitmapSize : this.fullBlockSize
|
||||
).then(
|
||||
buf =>
|
||||
onlyBitmap
|
||||
? { id: blockId, bitmap: buf }
|
||||
: {
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks () {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += VHD_ENTRY_SIZE
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
async _write (data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(
|
||||
this._fd ? this._fd : this._path,
|
||||
{
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
}
|
||||
)
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
}
|
||||
|
||||
async ensureBatSize (size) {
|
||||
const { header } = this
|
||||
|
||||
const prevMaxTableEntries = header.maxTableEntries
|
||||
if (prevMaxTableEntries >= size) {
|
||||
return
|
||||
}
|
||||
|
||||
const tableOffset = uint32ToUint64(header.tableOffset)
|
||||
// extend BAT
|
||||
const maxTableEntries = (header.maxTableEntries = size)
|
||||
const batSize = sectorsToBytes(
|
||||
sectorsRoundUpNoZero(maxTableEntries * VHD_ENTRY_SIZE)
|
||||
)
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(batSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevBat.length)
|
||||
debug(
|
||||
`ensureBatSize: extend in memory BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
|
||||
const extendBat = async () => {
|
||||
debug(
|
||||
`ensureBatSize: extend in file BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
|
||||
return this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
tableOffset + prevBat.length
|
||||
)
|
||||
}
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
if (tableOffset + batSize < sectorsToBytes(firstSector)) {
|
||||
return Promise.all([extendBat(), this.writeHeader()])
|
||||
}
|
||||
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = lastSector + fullBlockSize / VHD_SECTOR_SIZE
|
||||
debug(
|
||||
`ensureBatSize: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
|
||||
// copy the first block at the end
|
||||
const stream = await this._readStream(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(stream, sectorsToBytes(newFirstSector))
|
||||
await extendBat()
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeHeader()
|
||||
await this.writeFooter()
|
||||
} catch (e) {
|
||||
if (e.noBlock) {
|
||||
await extendBat()
|
||||
await this.writeHeader()
|
||||
await this.writeFooter()
|
||||
} else {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
_setBatEntry (block, blockSector) {
|
||||
const i = block * VHD_ENTRY_SIZE
|
||||
const { blockTable } = this
|
||||
|
||||
blockTable.writeUInt32BE(blockSector, i)
|
||||
|
||||
return this._write(
|
||||
blockTable.slice(i, i + VHD_ENTRY_SIZE),
|
||||
uint32ToUint64(this.header.tableOffset) + i
|
||||
)
|
||||
}
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock (blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
// Write a bitmap at a block address.
|
||||
async writeBlockBitmap (blockAddr, bitmap) {
|
||||
const { bitmapSize } = this
|
||||
|
||||
if (bitmap.length !== bitmapSize) {
|
||||
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
|
||||
}
|
||||
|
||||
const offset = sectorsToBytes(blockAddr)
|
||||
|
||||
debug(
|
||||
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
|
||||
'hex'
|
||||
)})`
|
||||
)
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeEntireBlock (block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${
|
||||
block.id
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
sectorsToBytes(endSectorId)
|
||||
),
|
||||
sectorsToBytes(offset)
|
||||
)
|
||||
}
|
||||
|
||||
async coalesceBlock (child, blockId) {
|
||||
const block = await child._readBlock(blockId)
|
||||
const { bitmap, data } = block
|
||||
|
||||
debug(`coalesceBlock block=${blockId}`)
|
||||
|
||||
// For each sector of block data...
|
||||
const { sectorsPerBlock } = child
|
||||
for (let i = 0; i < sectorsPerBlock; i++) {
|
||||
// If no changes on one sector, skip.
|
||||
if (!mapTestBit(bitmap, i)) {
|
||||
continue
|
||||
}
|
||||
let parentBitmap = null
|
||||
let endSector = i + 1
|
||||
|
||||
// Count changed sectors.
|
||||
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
|
||||
++endSector
|
||||
}
|
||||
|
||||
// Write n sectors into parent.
|
||||
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this.writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
}
|
||||
|
||||
// Return the merged data size
|
||||
return data.length
|
||||
}
|
||||
|
||||
// Write a context footer. (At the end and beginning of a vhd file.)
|
||||
async writeFooter () {
|
||||
const { footer } = this
|
||||
|
||||
const offset = this.getEndOfData()
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
`Write footer at: ${offset} (checksum=${
|
||||
footer.checksum
|
||||
}). (data=${rawFooter.toString('hex')})`
|
||||
)
|
||||
|
||||
await this._write(rawFooter, 0)
|
||||
await this._write(rawFooter, offset)
|
||||
}
|
||||
|
||||
writeHeader () {
|
||||
const { header } = this
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
header.checksum = checksumStruct(rawHeader, fuHeader)
|
||||
const offset = VHD_FOOTER_SIZE
|
||||
debug(
|
||||
`Write header at: ${offset} (checksum=${
|
||||
header.checksum
|
||||
}). (data=${rawHeader.toString('hex')})`
|
||||
)
|
||||
return this._write(rawHeader, offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
//
|
||||
// Child must be a delta backup !
|
||||
// Parent must be a full backup !
|
||||
//
|
||||
// TODO: update the identifier of the parent VHD.
|
||||
export default concurrency(2)(async function vhdMerge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
parentVhd._fd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
childVhd._fd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
// Child must be a delta.
|
||||
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
throw new Error('Unable to merge, child is not a delta backup.')
|
||||
}
|
||||
|
||||
// Allocation table map is not yet implemented.
|
||||
if (
|
||||
parentVhd.hasBlockAllocationTableMap() ||
|
||||
childVhd.hasBlockAllocationTableMap()
|
||||
) {
|
||||
throw new Error('Unsupported allocation table map.')
|
||||
}
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd._getBatEntry(blockId) !== BLOCK_UNUSED) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = { ...cFooter.currentSize }
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = { ...cFooter.originalSize }
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childVhd._fd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentVhd._fd)
|
||||
}
|
||||
})
|
||||
|
||||
// returns true if the child was actually modified
|
||||
export async function chainVhd (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const { header } = childVhd
|
||||
|
||||
const parentName = parentPath.split('/').pop()
|
||||
const parentUuid = parentVhd.footer.uuid
|
||||
if (
|
||||
header.parentUnicodeName !== parentName ||
|
||||
!isEqual(header.parentUuid, parentUuid)
|
||||
) {
|
||||
header.parentUuid = parentUuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.writeHeader()
|
||||
return true
|
||||
}
|
||||
|
||||
// The checksum was broken between xo-server v5.2.4 and v5.2.5
|
||||
//
|
||||
// Replace by a correct checksum if necessary.
|
||||
//
|
||||
// TODO: remove when enough time as passed (6 months).
|
||||
{
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
const checksum = checksumStruct(rawHeader, fuHeader)
|
||||
if (checksum !== header.checksum) {
|
||||
await childVhd._write(rawHeader, VHD_FOOTER_SIZE)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
import execa from 'execa'
|
||||
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
|
||||
import LocalHandler from './remote-handlers/local.js'
|
||||
|
||||
async function testVhdMerge () {
|
||||
console.log('before merge')
|
||||
const moOfRandom = 4
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
|
||||
])
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
|
||||
])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'raw',
|
||||
'-Ovpc',
|
||||
'randomfile',
|
||||
'randomfile.vhd',
|
||||
])
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
|
||||
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
|
||||
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
|
||||
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
const childVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
console.log('changing type')
|
||||
await childVhd.readHeaderAndFooter()
|
||||
console.log('child vhd', childVhd.footer.currentSize, originalSize)
|
||||
await childVhd.readBlockTable()
|
||||
childVhd.footer.diskType = 4 // Delta backup.
|
||||
await childVhd.writeFooter()
|
||||
console.log('chained')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
console.log('merged')
|
||||
const parentVhd = new Vhd(handler, 'empty.vhd')
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
console.log('parent vhd', parentVhd.footer.currentSize)
|
||||
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-Oraw',
|
||||
'empty.vhd',
|
||||
'recovered',
|
||||
])
|
||||
await execa('truncate', ['-s', originalSize, 'recovered'])
|
||||
console.log('ls', (await execa('ls', ['-lt'])).stdout)
|
||||
console.log(
|
||||
'diff',
|
||||
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
|
||||
)
|
||||
|
||||
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
await vhd.ensureBatSize(300)
|
||||
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
*/
|
||||
console.log(await handler.list())
|
||||
console.log('lol')
|
||||
}
|
||||
|
||||
export { testVhdMerge as default }
|
||||
@@ -146,6 +146,7 @@ const TRANSFORMS = {
|
||||
license_params: obj.license_params,
|
||||
license_server: obj.license_server,
|
||||
license_expiry: toTimestamp(obj.license_params.expiry),
|
||||
logging: obj.logging,
|
||||
name_description: obj.name_description,
|
||||
name_label: obj.name_label,
|
||||
memory: (function () {
|
||||
@@ -186,9 +187,14 @@ const TRANSFORMS = {
|
||||
}
|
||||
}),
|
||||
agentStartTime: toTimestamp(otherConfig.agent_start_time),
|
||||
rebootRequired: !isEmpty(obj.updates_requiring_reboot),
|
||||
rebootRequired:
|
||||
softwareVersion.product_brand === 'XCP-ng'
|
||||
? toTimestamp(otherConfig.boot_time) <
|
||||
+otherConfig.rpm_patch_installation_time
|
||||
: !isEmpty(obj.updates_requiring_reboot),
|
||||
tags: obj.tags,
|
||||
version: softwareVersion.product_version,
|
||||
productBrand: softwareVersion.product_brand,
|
||||
|
||||
// TODO: dedupe.
|
||||
PIFs: link(obj, 'PIFs'),
|
||||
@@ -227,18 +233,20 @@ const TRANSFORMS = {
|
||||
return
|
||||
}
|
||||
|
||||
if (!guestMetrics) {
|
||||
if (guestMetrics === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
const { major, minor } = guestMetrics.PV_drivers_version
|
||||
const [hostMajor, hostMinor] = (
|
||||
obj.$resident_on || obj.$pool.$master
|
||||
).software_version.product_version.split('.')
|
||||
|
||||
return major >= hostMajor && minor >= hostMinor
|
||||
? 'up to date'
|
||||
: 'out of date'
|
||||
if (major === undefined || minor === undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
return {
|
||||
major,
|
||||
minor,
|
||||
}
|
||||
})()
|
||||
|
||||
let resourceSet = otherConfig['xo:resource_set']
|
||||
@@ -294,8 +302,7 @@ const TRANSFORMS = {
|
||||
}
|
||||
})(),
|
||||
|
||||
// TODO: there is two possible value: "best-effort" and "restart"
|
||||
high_availability: Boolean(obj.ha_restart_priority),
|
||||
high_availability: obj.ha_restart_priority,
|
||||
|
||||
memory: (function () {
|
||||
const dynamicMin = +obj.memory_dynamic_min
|
||||
@@ -329,6 +336,7 @@ const TRANSFORMS = {
|
||||
other: otherConfig,
|
||||
os_version: (guestMetrics && guestMetrics.os_version) || null,
|
||||
power_state: obj.power_state,
|
||||
hasVendorDevice: obj.has_vendor_device,
|
||||
resourceSet,
|
||||
snapshots: link(obj, 'snapshots'),
|
||||
startTime: metrics && toTimestamp(metrics.start_time),
|
||||
@@ -512,9 +520,7 @@ const TRANSFORMS = {
|
||||
vdi.type += '-snapshot'
|
||||
vdi.snapshot_time = toTimestamp(obj.snapshot_time)
|
||||
vdi.$snapshot_of = link(obj, 'snapshot_of')
|
||||
}
|
||||
|
||||
if (!obj.managed) {
|
||||
} else if (!obj.managed) {
|
||||
vdi.type += '-unmanaged'
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,32 @@
|
||||
import endsWith from 'lodash/endsWith'
|
||||
import JSON5 from 'json5'
|
||||
import limitConcurrency from 'limit-concurrency-decorator'
|
||||
import { BaseError } from 'make-error'
|
||||
import {
|
||||
endsWith,
|
||||
findKey,
|
||||
forEach,
|
||||
get,
|
||||
identity,
|
||||
map,
|
||||
mapValues,
|
||||
mean,
|
||||
sum,
|
||||
uniq,
|
||||
zipWith,
|
||||
} from 'lodash'
|
||||
|
||||
import { parseDateTime } from './xapi'
|
||||
|
||||
export class FaultyGranularity extends BaseError {}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// according to https://xapi-project.github.io/xen-api/metrics.html
|
||||
// The values are stored at intervals of:
|
||||
// - 5 seconds for the past 10 minutes
|
||||
// - one minute for the past 2 hours
|
||||
// - one hour for the past week
|
||||
// - one day for the past year
|
||||
const RRD_STEP_SECONDS = 5
|
||||
const RRD_STEP_MINUTES = 60
|
||||
const RRD_STEP_HOURS = 3600
|
||||
@@ -17,6 +39,7 @@ const RRD_STEP_FROM_STRING = {
|
||||
days: RRD_STEP_DAYS,
|
||||
}
|
||||
|
||||
// points = intervalInSeconds / step
|
||||
const RRD_POINTS_PER_STEP = {
|
||||
[RRD_STEP_SECONDS]: 120,
|
||||
[RRD_STEP_MINUTES]: 120,
|
||||
@@ -24,16 +47,6 @@ const RRD_POINTS_PER_STEP = {
|
||||
[RRD_STEP_DAYS]: 366,
|
||||
}
|
||||
|
||||
export class XapiStatsError extends BaseError {}
|
||||
|
||||
export class UnknownLegendFormat extends XapiStatsError {
|
||||
constructor (line) {
|
||||
super('Unknown legend line: ' + line)
|
||||
}
|
||||
}
|
||||
|
||||
export class FaultyGranularity extends XapiStatsError {}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Utils
|
||||
// -------------------------------------------------------------------
|
||||
@@ -47,353 +60,185 @@ function convertNanToNull (value) {
|
||||
return isNaN(value) ? null : value
|
||||
}
|
||||
|
||||
async function getServerTimestamp (xapi, host) {
|
||||
const serverLocalTime = await xapi.call('host.get_servertime', host.$ref)
|
||||
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1000)
|
||||
async function getServerTimestamp (xapi, hostRef) {
|
||||
const serverLocalTime = await xapi.call('host.get_servertime', hostRef)
|
||||
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1e3)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Stats
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getNewHostStats () {
|
||||
return {
|
||||
cpus: [],
|
||||
pifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
load: [],
|
||||
memory: [],
|
||||
memoryFree: [],
|
||||
memoryUsed: [],
|
||||
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
|
||||
map(dataRow, ({ values }) =>
|
||||
transformValue(convertNanToNull(values[legendIndex]))
|
||||
)
|
||||
|
||||
const combineStats = (stats, path, combineValues) =>
|
||||
zipWith(...map(stats, path), (...values) => combineValues(values))
|
||||
|
||||
// It browse the object in depth and initialise it's properties
|
||||
// The targerPath can be a string or an array containing the depth
|
||||
// targetPath: [a, b, c] => a.b.c
|
||||
const getValuesFromDepth = (obj, targetPath) => {
|
||||
if (typeof targetPath === 'string') {
|
||||
return (obj[targetPath] = [])
|
||||
}
|
||||
}
|
||||
|
||||
function getNewVmStats () {
|
||||
return {
|
||||
cpus: [],
|
||||
vifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
xvds: {
|
||||
r: {},
|
||||
w: {},
|
||||
},
|
||||
memory: [],
|
||||
memoryFree: [],
|
||||
memoryUsed: [],
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Stats legends
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getNewHostLegends () {
|
||||
return {
|
||||
cpus: [],
|
||||
pifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
load: null,
|
||||
memoryFree: null,
|
||||
memory: null,
|
||||
}
|
||||
}
|
||||
|
||||
function getNewVmLegends () {
|
||||
return {
|
||||
cpus: [],
|
||||
vifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
xvds: {
|
||||
r: [],
|
||||
w: [],
|
||||
},
|
||||
memoryFree: null,
|
||||
memory: null,
|
||||
}
|
||||
}
|
||||
|
||||
// Compute one legend line for one host
|
||||
function parseOneHostLegend (hostLegend, type, index) {
|
||||
let resReg
|
||||
|
||||
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
|
||||
hostLegend.cpus[resReg[1]] = index
|
||||
} else if ((resReg = /^pif_eth([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
|
||||
if (resReg[2] === 'rx') {
|
||||
hostLegend.pifs.rx[resReg[1]] = index
|
||||
} else {
|
||||
hostLegend.pifs.tx[resReg[1]] = index
|
||||
forEach(targetPath, (path, key) => {
|
||||
if (obj[path] === undefined) {
|
||||
obj = obj[path] = targetPath.length - 1 === key ? [] : {}
|
||||
return
|
||||
}
|
||||
} else if (type === 'loadavg') {
|
||||
hostLegend.load = index
|
||||
} else if (type === 'memory_free_kib') {
|
||||
hostLegend.memoryFree = index
|
||||
} else if (type === 'memory_total_kib') {
|
||||
hostLegend.memory = index
|
||||
}
|
||||
obj = obj[path]
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Compute one legend line for one vm
|
||||
function parseOneVmLegend (vmLegend, type, index) {
|
||||
let resReg
|
||||
const testMetric = (test, type) =>
|
||||
typeof test === 'string'
|
||||
? test === type
|
||||
: typeof test === 'function' ? test(type) : test.exec(type)
|
||||
|
||||
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
|
||||
vmLegend.cpus[resReg[1]] = index
|
||||
} else if ((resReg = /^vif_([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
|
||||
if (resReg[2] === 'rx') {
|
||||
vmLegend.vifs.rx[resReg[1]] = index
|
||||
} else {
|
||||
vmLegend.vifs.tx[resReg[1]] = index
|
||||
}
|
||||
} else if ((resReg = /^vbd_xvd(.)_(read|write)$/.exec(type))) {
|
||||
if (resReg[2] === 'read') {
|
||||
vmLegend.xvds.r[resReg[1]] = index
|
||||
} else {
|
||||
vmLegend.xvds.w[resReg[1]] = index
|
||||
}
|
||||
} else if (type === 'memory_internal_free') {
|
||||
vmLegend.memoryFree = index
|
||||
} else if (endsWith(type, 'memory')) {
|
||||
vmLegend.memory = index
|
||||
}
|
||||
}
|
||||
const findMetric = (metrics, metricType) => {
|
||||
let testResult
|
||||
let metric
|
||||
|
||||
// Compute Stats Legends for host and vms from RRD update
|
||||
function parseLegends (json) {
|
||||
const hostLegends = getNewHostLegends()
|
||||
const vmsLegends = {}
|
||||
forEach(metrics, (current, key) => {
|
||||
if (current.test === undefined) {
|
||||
const newValues = findMetric(current, metricType)
|
||||
|
||||
json.meta.legend.forEach((value, index) => {
|
||||
const parsedLine = /^AVERAGE:(host|vm):(.+):(.+)$/.exec(value)
|
||||
|
||||
if (parsedLine === null) {
|
||||
throw new UnknownLegendFormat(value)
|
||||
}
|
||||
|
||||
const [, name, uuid, type] = parsedLine
|
||||
|
||||
if (name !== 'vm') {
|
||||
parseOneHostLegend(hostLegends, type, index)
|
||||
} else {
|
||||
if (vmsLegends[uuid] === undefined) {
|
||||
vmsLegends[uuid] = getNewVmLegends()
|
||||
metric = newValues.metric
|
||||
if (metric !== undefined) {
|
||||
testResult = newValues.testResult
|
||||
return false
|
||||
}
|
||||
|
||||
parseOneVmLegend(vmsLegends[uuid], type, index)
|
||||
} else if ((testResult = testMetric(current.test, metricType))) {
|
||||
metric = current
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
return [hostLegends, vmsLegends]
|
||||
return { metric, testResult }
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// The metrics:
|
||||
// test: can be a function, regexp or string, default to: currentKey
|
||||
// getPath: default to: () => currentKey
|
||||
// transformValue: default to: identity
|
||||
const STATS = {
|
||||
host: {
|
||||
load: {
|
||||
test: 'loadavg',
|
||||
},
|
||||
memoryFree: {
|
||||
test: 'memory_free_kib',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
memory: {
|
||||
test: 'memory_total_kib',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
cpus: {
|
||||
test: /^cpu(\d+)$/,
|
||||
getPath: matches => ['cpus', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
pifs: {
|
||||
rx: {
|
||||
test: /^pif_eth(\d+)_rx$/,
|
||||
getPath: matches => ['pifs', 'rx', matches[1]],
|
||||
},
|
||||
tx: {
|
||||
test: /^pif_eth(\d+)_tx$/,
|
||||
getPath: matches => ['pifs', 'tx', matches[1]],
|
||||
},
|
||||
},
|
||||
iops: {
|
||||
r: {
|
||||
test: /^iops_read_(\w+)$/,
|
||||
getPath: matches => ['iops', 'r', matches[1]],
|
||||
},
|
||||
w: {
|
||||
test: /^iops_write_(\w+)$/,
|
||||
getPath: matches => ['iops', 'w', matches[1]],
|
||||
},
|
||||
},
|
||||
ioThroughput: {
|
||||
r: {
|
||||
test: /^io_throughput_read_(\w+)$/,
|
||||
getPath: matches => ['ioThroughput', 'r', matches[1]],
|
||||
transformValue: value => value * 2 ** 20,
|
||||
},
|
||||
w: {
|
||||
test: /^io_throughput_write_(\w+)$/,
|
||||
getPath: matches => ['ioThroughput', 'w', matches[1]],
|
||||
transformValue: value => value * 2 ** 20,
|
||||
},
|
||||
},
|
||||
latency: {
|
||||
r: {
|
||||
test: /^read_latency_(\w+)$/,
|
||||
getPath: matches => ['latency', 'r', matches[1]],
|
||||
transformValue: value => value / 1e3,
|
||||
},
|
||||
w: {
|
||||
test: /^write_latency_(\w+)$/,
|
||||
getPath: matches => ['latency', 'w', matches[1]],
|
||||
transformValue: value => value / 1e3,
|
||||
},
|
||||
},
|
||||
iowait: {
|
||||
test: /^iowait_(\w+)$/,
|
||||
getPath: matches => ['iowait', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
},
|
||||
vm: {
|
||||
memoryFree: {
|
||||
test: 'memory_internal_free',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
memory: {
|
||||
test: metricType => endsWith(metricType, 'memory'),
|
||||
},
|
||||
cpus: {
|
||||
test: /^cpu(\d+)$/,
|
||||
getPath: matches => ['cpus', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
vifs: {
|
||||
rx: {
|
||||
test: /^vif_(\d+)_rx$/,
|
||||
getPath: matches => ['vifs', 'rx', matches[1]],
|
||||
},
|
||||
tx: {
|
||||
test: /^vif_(\d+)_tx$/,
|
||||
getPath: matches => ['vifs', 'tx', matches[1]],
|
||||
},
|
||||
},
|
||||
xvds: {
|
||||
r: {
|
||||
test: /^vbd_xvd(.)_read$/,
|
||||
getPath: matches => ['xvds', 'r', matches[1]],
|
||||
},
|
||||
w: {
|
||||
test: /^vbd_xvd(.)_write$/,
|
||||
getPath: matches => ['xvds', 'w', matches[1]],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export default class XapiStats {
|
||||
constructor () {
|
||||
this._vms = {}
|
||||
this._hosts = {}
|
||||
this._statsByObject = {}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Remove stats (Helper)
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
_removeOlderStats (source, dest, pointsPerStep) {
|
||||
for (const key in source) {
|
||||
if (key === 'cpus') {
|
||||
for (const cpuIndex in source.cpus) {
|
||||
dest.cpus[cpuIndex].splice(
|
||||
0,
|
||||
dest.cpus[cpuIndex].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of cpus has been decreased, remove !
|
||||
let offset
|
||||
|
||||
if ((offset = dest.cpus.length - source.cpus.length) > 0) {
|
||||
dest.cpus.splice(-offset)
|
||||
}
|
||||
} else if (endsWith(key, 'ifs')) {
|
||||
// For each pif or vif
|
||||
for (const ifType in source[key]) {
|
||||
for (const pifIndex in source[key][ifType]) {
|
||||
dest[key][ifType][pifIndex].splice(
|
||||
0,
|
||||
dest[key][ifType][pifIndex].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of pifs has been decreased, remove !
|
||||
let offset
|
||||
|
||||
if (
|
||||
(offset = dest[key][ifType].length - source[key][ifType].length) > 0
|
||||
) {
|
||||
dest[key][ifType].splice(-offset)
|
||||
}
|
||||
}
|
||||
} else if (key === 'xvds') {
|
||||
for (const xvdType in source.xvds) {
|
||||
for (const xvdLetter in source.xvds[xvdType]) {
|
||||
dest.xvds[xvdType][xvdLetter].splice(
|
||||
0,
|
||||
dest.xvds[xvdType][xvdLetter].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of xvds has been decreased, remove !
|
||||
// FIXME
|
||||
}
|
||||
} else if (key === 'load') {
|
||||
dest.load.splice(0, dest[key].length - pointsPerStep)
|
||||
} else if (key === 'memory') {
|
||||
// Load, memory, memoryFree, memoryUsed
|
||||
const length = dest.memory.length - pointsPerStep
|
||||
dest.memory.splice(0, length)
|
||||
dest.memoryFree.splice(0, length)
|
||||
dest.memoryUsed.splice(0, length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// HOST: Computation and stats update
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Compute one stats row for one host
|
||||
_parseRowHostStats (hostLegends, hostStats, values) {
|
||||
// Cpus
|
||||
hostLegends.cpus.forEach((cpuIndex, index) => {
|
||||
if (hostStats.cpus[index] === undefined) {
|
||||
hostStats.cpus[index] = []
|
||||
}
|
||||
|
||||
hostStats.cpus[index].push(values[cpuIndex] * 100)
|
||||
})
|
||||
|
||||
// Pifs
|
||||
for (const pifType in hostLegends.pifs) {
|
||||
hostLegends.pifs[pifType].forEach((pifIndex, index) => {
|
||||
if (hostStats.pifs[pifType][index] === undefined) {
|
||||
hostStats.pifs[pifType][index] = []
|
||||
}
|
||||
|
||||
hostStats.pifs[pifType][index].push(convertNanToNull(values[pifIndex]))
|
||||
})
|
||||
}
|
||||
|
||||
// Load
|
||||
hostStats.load.push(convertNanToNull(values[hostLegends.load]))
|
||||
|
||||
// Memory.
|
||||
// WARNING! memory/memoryFree are in kB.
|
||||
const memory = values[hostLegends.memory] * 1024
|
||||
const memoryFree = values[hostLegends.memoryFree] * 1024
|
||||
|
||||
hostStats.memory.push(memory)
|
||||
|
||||
if (hostLegends.memoryFree !== undefined) {
|
||||
hostStats.memoryFree.push(memoryFree)
|
||||
hostStats.memoryUsed.push(memory - memoryFree)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute stats for host from RRD update
|
||||
_parseHostStats (json, hostname, hostLegends, step) {
|
||||
const host = this._hosts[hostname][step]
|
||||
|
||||
if (host.stats === undefined) {
|
||||
host.stats = getNewHostStats()
|
||||
}
|
||||
|
||||
for (const row of json.data) {
|
||||
this._parseRowHostStats(hostLegends, host.stats, row.values)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// VM: Computation and stats update
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Compute stats for vms from RRD update
|
||||
_parseRowVmStats (vmLegends, vmStats, values) {
|
||||
// Cpus
|
||||
vmLegends.cpus.forEach((cpuIndex, index) => {
|
||||
if (vmStats.cpus[index] === undefined) {
|
||||
vmStats.cpus[index] = []
|
||||
}
|
||||
|
||||
vmStats.cpus[index].push(values[cpuIndex] * 100)
|
||||
})
|
||||
|
||||
// Vifs
|
||||
for (const vifType in vmLegends.vifs) {
|
||||
vmLegends.vifs[vifType].forEach((vifIndex, index) => {
|
||||
if (vmStats.vifs[vifType][index] === undefined) {
|
||||
vmStats.vifs[vifType][index] = []
|
||||
}
|
||||
|
||||
vmStats.vifs[vifType][index].push(convertNanToNull(values[vifIndex]))
|
||||
})
|
||||
}
|
||||
|
||||
// Xvds
|
||||
for (const xvdType in vmLegends.xvds) {
|
||||
for (const index in vmLegends.xvds[xvdType]) {
|
||||
if (vmStats.xvds[xvdType][index] === undefined) {
|
||||
vmStats.xvds[xvdType][index] = []
|
||||
}
|
||||
|
||||
vmStats.xvds[xvdType][index].push(
|
||||
convertNanToNull(values[vmLegends.xvds[xvdType][index]])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Memory
|
||||
// WARNING! memoryFree is in Kb not in b, memory is in b
|
||||
const memory = values[vmLegends.memory]
|
||||
const memoryFree = values[vmLegends.memoryFree] * 1024
|
||||
|
||||
vmStats.memory.push(memory)
|
||||
|
||||
if (vmLegends.memoryFree !== undefined) {
|
||||
vmStats.memoryFree.push(memoryFree)
|
||||
vmStats.memoryUsed.push(memory - memoryFree)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute stats for vms
|
||||
_parseVmsStats (json, hostname, vmsLegends, step) {
|
||||
if (this._vms[hostname][step] === undefined) {
|
||||
this._vms[hostname][step] = {}
|
||||
}
|
||||
|
||||
const vms = this._vms[hostname][step]
|
||||
|
||||
for (const uuid in vmsLegends) {
|
||||
if (vms[uuid] === undefined) {
|
||||
vms[uuid] = getNewVmStats()
|
||||
}
|
||||
}
|
||||
|
||||
for (const row of json.data) {
|
||||
for (const uuid in vmsLegends) {
|
||||
this._parseRowVmStats(vmsLegends[uuid], vms[uuid], row.values)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Execute one http request on a XenServer for get stats
|
||||
// Return stats (Json format) or throws got exception
|
||||
@limitConcurrency(3)
|
||||
@@ -411,40 +256,46 @@ export default class XapiStats {
|
||||
.then(response => response.readAll().then(JSON5.parse))
|
||||
}
|
||||
|
||||
async _getLastTimestamp (xapi, host, step) {
|
||||
if (this._hosts[host.address][step] === undefined) {
|
||||
const serverTimeStamp = await getServerTimestamp(xapi, host)
|
||||
return serverTimeStamp - step * RRD_POINTS_PER_STEP[step] + step
|
||||
}
|
||||
async _getNextTimestamp (xapi, host, step) {
|
||||
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
|
||||
const maxDuration = step * RRD_POINTS_PER_STEP[step]
|
||||
const lastTimestamp = get(this._statsByObject, [
|
||||
host.uuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
|
||||
return this._hosts[host.address][step].endTimestamp
|
||||
if (
|
||||
lastTimestamp === undefined ||
|
||||
currentTimeStamp - lastTimestamp + step > maxDuration
|
||||
) {
|
||||
return currentTimeStamp - maxDuration + step
|
||||
}
|
||||
return lastTimestamp
|
||||
}
|
||||
|
||||
_getPoints (hostname, step, vmId) {
|
||||
const hostStats = this._hosts[hostname][step]
|
||||
_getStats (hostUuid, step, vmUuid) {
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
|
||||
// Return host points
|
||||
if (vmId === undefined) {
|
||||
// Return host stats
|
||||
if (vmUuid === undefined) {
|
||||
return {
|
||||
interval: step,
|
||||
...hostStats,
|
||||
}
|
||||
}
|
||||
|
||||
const vmsStats = this._vms[hostname][step]
|
||||
|
||||
// Return vm points
|
||||
// Return vm stats
|
||||
return {
|
||||
interval: step,
|
||||
endTimestamp: hostStats.endTimestamp,
|
||||
stats: (vmsStats && vmsStats[vmId]) || getNewVmStats(),
|
||||
...this._statsByObject[vmUuid][step],
|
||||
}
|
||||
}
|
||||
|
||||
async _getAndUpdatePoints (xapi, host, vmId, granularity) {
|
||||
// Get granularity to use
|
||||
async _getAndUpdateStats (xapi, { host, vmUuid, granularity }) {
|
||||
const step =
|
||||
granularity === undefined || granularity === 0
|
||||
granularity === undefined
|
||||
? RRD_STEP_SECONDS
|
||||
: RRD_STEP_FROM_STRING[granularity]
|
||||
|
||||
@@ -455,59 +306,21 @@ export default class XapiStats {
|
||||
}
|
||||
|
||||
// Limit the number of http requests
|
||||
const hostname = host.address
|
||||
|
||||
if (this._hosts[hostname] === undefined) {
|
||||
this._hosts[hostname] = {}
|
||||
this._vms[hostname] = {}
|
||||
}
|
||||
const hostUuid = host.uuid
|
||||
|
||||
if (
|
||||
this._hosts[hostname][step] !== undefined &&
|
||||
this._hosts[hostname][step].localTimestamp + step > getCurrentTimestamp()
|
||||
get(this._statsByObject, [hostUuid, step, 'localTimestamp']) + step >
|
||||
getCurrentTimestamp()
|
||||
) {
|
||||
return this._getPoints(hostname, step, vmId)
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
}
|
||||
|
||||
// Check if we are in the good interval, use this._hosts[hostname][step].localTimestamp
|
||||
// for avoid bad requests
|
||||
// TODO
|
||||
|
||||
// Get json
|
||||
const timestamp = await this._getLastTimestamp(xapi, host, step)
|
||||
let json = await this._getJson(xapi, host, timestamp)
|
||||
|
||||
// Check if the granularity is linked to 'step'
|
||||
// If it's not the case, we retry other url with the json timestamp
|
||||
const timestamp = await this._getNextTimestamp(xapi, host, step)
|
||||
const json = await this._getJson(xapi, host, timestamp)
|
||||
if (json.meta.step !== step) {
|
||||
console.log(
|
||||
`RRD call: Expected step: ${step}, received step: ${
|
||||
json.meta.step
|
||||
}. Retry with other timestamp`
|
||||
throw new FaultyGranularity(
|
||||
`Unable to get the true granularity: ${json.meta.step}`
|
||||
)
|
||||
const serverTimestamp = await getServerTimestamp(xapi, host)
|
||||
|
||||
// Approximately: half points are asked
|
||||
// FIXME: Not the best solution
|
||||
json = await this._getJson(
|
||||
xapi,
|
||||
host,
|
||||
serverTimestamp - step * (RRD_POINTS_PER_STEP[step] / 2) + step
|
||||
)
|
||||
|
||||
if (json.meta.step !== step) {
|
||||
throw new FaultyGranularity(
|
||||
`Unable to get the true granularity: ${json.meta.step}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Make new backup slot if necessary
|
||||
if (this._hosts[hostname][step] === undefined) {
|
||||
this._hosts[hostname][step] = {
|
||||
endTimestamp: 0,
|
||||
localTimestamp: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// It exists data
|
||||
@@ -516,70 +329,133 @@ export default class XapiStats {
|
||||
// timestamp of the oldest data value
|
||||
// So, we use the timestamp of the oldest data value !
|
||||
const startTimestamp = json.data[json.meta.rows - 1].t
|
||||
const endTimestamp = get(this._statsByObject, [
|
||||
hostUuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
|
||||
// Remove useless data and reorder
|
||||
// Note: Older values are at end of json.data.row
|
||||
const parseOffset =
|
||||
(this._hosts[hostname][step].endTimestamp - startTimestamp + step) /
|
||||
step
|
||||
|
||||
json.data.splice(json.data.length - parseOffset)
|
||||
json.data.reverse()
|
||||
const statsOffset = endTimestamp - startTimestamp + step
|
||||
if (endTimestamp !== undefined && statsOffset > 0) {
|
||||
const parseOffset = statsOffset / step
|
||||
// Remove useless data
|
||||
// Note: Older values are at end of json.data.row
|
||||
json.data.splice(json.data.length - parseOffset)
|
||||
}
|
||||
|
||||
// It exists useful data
|
||||
if (json.data.length > 0) {
|
||||
const [hostLegends, vmsLegends] = parseLegends(json)
|
||||
|
||||
// Compute and update host/vms stats
|
||||
this._parseVmsStats(json, hostname, vmsLegends, step)
|
||||
this._parseHostStats(json, hostname, hostLegends, step)
|
||||
|
||||
// Remove older stats
|
||||
this._removeOlderStats(
|
||||
hostLegends,
|
||||
this._hosts[hostname][step].stats,
|
||||
RRD_POINTS_PER_STEP[step]
|
||||
)
|
||||
|
||||
for (const uuid in vmsLegends) {
|
||||
this._removeOlderStats(
|
||||
vmsLegends[uuid],
|
||||
this._vms[hostname][step][uuid],
|
||||
RRD_POINTS_PER_STEP[step]
|
||||
// reorder data
|
||||
json.data.reverse()
|
||||
forEach(json.meta.legend, (legend, index) => {
|
||||
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
|
||||
legend
|
||||
)
|
||||
}
|
||||
|
||||
const metrics = STATS[type]
|
||||
if (metrics === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { metric, testResult } = findMetric(metrics, metricType)
|
||||
|
||||
if (metric === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const path =
|
||||
metric.getPath !== undefined
|
||||
? metric.getPath(testResult)
|
||||
: [findKey(metrics, metric)]
|
||||
|
||||
const metricValues = getValuesFromDepth(this._statsByObject, [
|
||||
uuid,
|
||||
step,
|
||||
'stats',
|
||||
...path,
|
||||
])
|
||||
|
||||
metricValues.push(
|
||||
...computeValues(json.data, index, metric.transformValue)
|
||||
)
|
||||
|
||||
// remove older Values
|
||||
metricValues.splice(
|
||||
0,
|
||||
metricValues.length - RRD_POINTS_PER_STEP[step]
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
this._hosts[hostname][step].endTimestamp = json.meta.end
|
||||
this._hosts[hostname][step].localTimestamp = getCurrentTimestamp()
|
||||
|
||||
return this._getPoints(hostname, step, vmId)
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
hostStats.endTimestamp = json.meta.end
|
||||
hostStats.localTimestamp = getCurrentTimestamp()
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Warning: This functions returns one reference on internal data
|
||||
// So, data can be changed by a parallel call on this functions
|
||||
// It is forbidden to modify the returned data
|
||||
|
||||
// Return host stats
|
||||
async getHostPoints (xapi, hostId, granularity) {
|
||||
const host = xapi.getObject(hostId)
|
||||
return this._getAndUpdatePoints(xapi, host, undefined, granularity)
|
||||
getHostStats (xapi, hostId, granularity) {
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host: xapi.getObject(hostId),
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
|
||||
// Return vms stats
|
||||
async getVmPoints (xapi, vmId, granularity) {
|
||||
getVmStats (xapi, vmId, granularity) {
|
||||
const vm = xapi.getObject(vmId)
|
||||
const host = vm.$resident_on
|
||||
|
||||
if (!host) {
|
||||
throw new Error(`VM ${vmId} is halted or host could not be found.`)
|
||||
}
|
||||
|
||||
return this._getAndUpdatePoints(xapi, host, vm.uuid, granularity)
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host,
|
||||
vmUuid: vm.uuid,
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
|
||||
async getSrStats (xapi, srId, granularity) {
|
||||
const sr = xapi.getObject(srId)
|
||||
|
||||
const hostsStats = {}
|
||||
await Promise.all(
|
||||
map(uniq(map(sr.$PBDs, 'host')), hostId =>
|
||||
this.getHostStats(xapi, hostId, granularity).then(stats => {
|
||||
hostsStats[xapi.getObject(hostId).name_label] = stats
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
const srShortUUID = sr.uuid.slice(0, 8)
|
||||
return {
|
||||
interval: hostsStats[Object.keys(hostsStats)[0]].interval,
|
||||
endTimestamp: Math.max(...map(hostsStats, 'endTimestamp')),
|
||||
localTimestamp: Math.min(...map(hostsStats, 'localTimestamp')),
|
||||
stats: {
|
||||
iops: {
|
||||
r: combineStats(hostsStats, `stats.iops.r[${srShortUUID}]`, sum),
|
||||
w: combineStats(hostsStats, `stats.iops.w[${srShortUUID}]`, sum),
|
||||
},
|
||||
ioThroughput: {
|
||||
r: combineStats(
|
||||
hostsStats,
|
||||
`stats.ioThroughput.r[${srShortUUID}]`,
|
||||
sum
|
||||
),
|
||||
w: combineStats(
|
||||
hostsStats,
|
||||
`stats.ioThroughput.w[${srShortUUID}]`,
|
||||
sum
|
||||
),
|
||||
},
|
||||
latency: {
|
||||
r: combineStats(hostsStats, `stats.latency.r[${srShortUUID}]`, mean),
|
||||
w: combineStats(hostsStats, `stats.latency.w[${srShortUUID}]`, mean),
|
||||
},
|
||||
iowait: mapValues(hostsStats, `stats.iowait[${srShortUUID}]`),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ import {
|
||||
|
||||
import mixins from './mixins'
|
||||
import OTHER_CONFIG_TEMPLATE from './other-config-template'
|
||||
import { type DeltaVmExport } from './'
|
||||
import {
|
||||
asBoolean,
|
||||
asInteger,
|
||||
@@ -265,8 +266,8 @@ export default class Xapi extends XapiBase {
|
||||
return value === null
|
||||
? removal
|
||||
: removal
|
||||
::ignoreErrors()
|
||||
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
|
||||
::ignoreErrors()
|
||||
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
|
||||
}
|
||||
})
|
||||
)
|
||||
@@ -425,6 +426,14 @@ export default class Xapi extends XapiBase {
|
||||
await this.call('host.restart_agent', this.getObject(hostId).$ref)
|
||||
}
|
||||
|
||||
async setRemoteSyslogHost (hostId, syslogDestination) {
|
||||
const host = this.getObject(hostId)
|
||||
await this.call('host.set_logging', host.$ref, {
|
||||
syslog_destination: syslogDestination,
|
||||
})
|
||||
await this.call('host.syslog_reconfigure', host.$ref)
|
||||
}
|
||||
|
||||
async shutdownHost (hostId, force = false) {
|
||||
const host = this.getObject(hostId)
|
||||
|
||||
@@ -516,9 +525,9 @@ export default class Xapi extends XapiBase {
|
||||
const onVmCreation =
|
||||
nameLabel !== undefined
|
||||
? vm =>
|
||||
targetXapi._setObjectProperties(vm, {
|
||||
nameLabel,
|
||||
})
|
||||
targetXapi._setObjectProperties(vm, {
|
||||
nameLabel,
|
||||
})
|
||||
: null
|
||||
|
||||
const vm = await targetXapi._getOrWaitObject(
|
||||
@@ -632,7 +641,12 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
async _deleteVm (vm, deleteDisks = true, force = false) {
|
||||
async _deleteVm (
|
||||
vm,
|
||||
deleteDisks = true,
|
||||
force = false,
|
||||
forceDeleteDefaultTemplate = false
|
||||
) {
|
||||
debug(`Deleting VM ${vm.name_label}`)
|
||||
|
||||
const { $ref } = vm
|
||||
@@ -653,6 +667,10 @@ export default class Xapi extends XapiBase {
|
||||
vm = await this.barrier('VM', $ref)
|
||||
|
||||
return Promise.all([
|
||||
forceDeleteDefaultTemplate &&
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
default_template: null,
|
||||
}),
|
||||
this.call('VM.destroy', $ref),
|
||||
|
||||
asyncMap(vm.$snapshots, snapshot =>
|
||||
@@ -692,8 +710,13 @@ export default class Xapi extends XapiBase {
|
||||
])
|
||||
}
|
||||
|
||||
async deleteVm (vmId, deleteDisks, force) {
|
||||
return /* await */ this._deleteVm(this.getObject(vmId), deleteDisks, force)
|
||||
async deleteVm (vmId, deleteDisks, force, forceDeleteDefaultTemplate) {
|
||||
return /* await */ this._deleteVm(
|
||||
this.getObject(vmId),
|
||||
deleteDisks,
|
||||
force,
|
||||
forceDeleteDefaultTemplate
|
||||
)
|
||||
}
|
||||
|
||||
getVmConsole (vmId) {
|
||||
@@ -801,12 +824,14 @@ export default class Xapi extends XapiBase {
|
||||
} = {}
|
||||
): Promise<DeltaVmExport> {
|
||||
let vm = this.getObject(vmId)
|
||||
if (!bypassVdiChainsCheck) {
|
||||
this._assertHealthyVdiChains(vm)
|
||||
}
|
||||
|
||||
// do not use the snapshot name in the delta export
|
||||
const exportedNameLabel = vm.name_label
|
||||
if (!vm.is_a_snapshot) {
|
||||
if (!bypassVdiChainsCheck) {
|
||||
this._assertHealthyVdiChains(vm)
|
||||
}
|
||||
|
||||
vm = await this._snapshotVm($cancelToken, vm, snapshotNameLabel)
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
}
|
||||
@@ -859,29 +884,30 @@ export default class Xapi extends XapiBase {
|
||||
// Look for a snapshot of this vdi in the base VM.
|
||||
const baseVdi = baseVdis[vdi.snapshot_of]
|
||||
|
||||
vdis[vdiRef] =
|
||||
baseVdi && !disableBaseTags
|
||||
? {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: baseVdi.uuid,
|
||||
},
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
: {
|
||||
...vdi,
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
vdis[vdiRef] = {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]:
|
||||
baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
|
||||
},
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
|
||||
streams[`${vdiRef}.vhd`] = () =>
|
||||
this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
|
||||
})
|
||||
|
||||
const vifs = {}
|
||||
forEach(vm.$VIFs, vif => {
|
||||
const network = vif.$network
|
||||
vifs[vif.$ref] = {
|
||||
...vif,
|
||||
$network$uuid: vif.$network.uuid,
|
||||
$network$uuid: network.uuid,
|
||||
$network$name_label: network.name_label,
|
||||
// https://github.com/babel/babel-eslint/issues/595
|
||||
// eslint-disable-next-line no-undef
|
||||
$network$VLAN: network.$PIFs[0]?.VLAN,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -897,9 +923,9 @@ export default class Xapi extends XapiBase {
|
||||
other_config:
|
||||
baseVm && !disableBaseTags
|
||||
? {
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
: omit(vm.other_config, TAG_BASE_DELTA),
|
||||
},
|
||||
},
|
||||
@@ -918,6 +944,7 @@ export default class Xapi extends XapiBase {
|
||||
delta: DeltaVmExport,
|
||||
{
|
||||
deleteBase = false,
|
||||
detectBase = true,
|
||||
disableStartAfterImport = true,
|
||||
mapVdisSrs = {},
|
||||
name_label = delta.vm.name_label,
|
||||
@@ -930,17 +957,21 @@ export default class Xapi extends XapiBase {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const remoteBaseVmUuid = delta.vm.other_config[TAG_BASE_DELTA]
|
||||
let baseVm
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(
|
||||
this.objects.all,
|
||||
obj =>
|
||||
(obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid
|
||||
)
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = delta.vm.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(
|
||||
this.objects.all,
|
||||
obj =>
|
||||
(obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid
|
||||
)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error('could not find the base VM')
|
||||
if (!baseVm) {
|
||||
throw new Error(
|
||||
`could not find the base VM (copy of ${remoteBaseVmUuid})`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,11 +1006,29 @@ export default class Xapi extends XapiBase {
|
||||
// 2. Delete all VBDs which may have been created by the import.
|
||||
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
|
||||
|
||||
// 3. Create VDIs.
|
||||
const newVdis = await map(delta.vdis, async vdi => {
|
||||
const remoteBaseVdiUuid = vdi.other_config[TAG_BASE_DELTA]
|
||||
if (!remoteBaseVdiUuid) {
|
||||
const newVdi = await this.createVdi({
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbds = groupBy(delta.vbds, 'VDI')
|
||||
const newVdis = await map(delta.vdis, async (vdi, vdiId) => {
|
||||
let newVdi
|
||||
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVdiUuid) {
|
||||
const baseVdi = find(
|
||||
baseVdis,
|
||||
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
|
||||
)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
await this._updateObjectMapProperty(newVdi, 'other_config', {
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
})
|
||||
} else {
|
||||
newVdi = await this.createVdi({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
@@ -989,47 +1038,40 @@ export default class Xapi extends XapiBase {
|
||||
sr: mapVdisSrs[vdi.uuid] || srId,
|
||||
})
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
return newVdi
|
||||
}
|
||||
|
||||
const baseVdi = find(
|
||||
baseVdis,
|
||||
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
|
||||
await asyncMap(vbds[vdiId], vbd =>
|
||||
this.createVbd({
|
||||
...vbd,
|
||||
vdi: newVdi,
|
||||
vm,
|
||||
})
|
||||
)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
const newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
await this._updateObjectMapProperty(newVdi, 'other_config', {
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
})
|
||||
|
||||
return newVdi
|
||||
})::pAll()
|
||||
|
||||
const networksOnPoolMasterByDevice = {}
|
||||
const networksByNameLabelByVlan = {}
|
||||
let defaultNetwork
|
||||
forEach(this.pool.$master.$PIFs, pif => {
|
||||
defaultNetwork = networksOnPoolMasterByDevice[pif.device] = pif.$network
|
||||
forEach(this.objects.all, object => {
|
||||
if (object.$type === 'network') {
|
||||
const pif = object.$PIFs[0]
|
||||
if (pif === undefined) {
|
||||
// ignore network
|
||||
return
|
||||
}
|
||||
const vlan = pif.VLAN
|
||||
const networksByNameLabel =
|
||||
networksByNameLabelByVlan[vlan] ||
|
||||
(networksByNameLabelByVlan[vlan] = {})
|
||||
defaultNetwork = networksByNameLabel[object.name_label] = object
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = delta
|
||||
let transferSize = 0
|
||||
|
||||
await Promise.all([
|
||||
// Create VBDs.
|
||||
asyncMap(delta.vbds, vbd =>
|
||||
this.createVbd({
|
||||
...vbd,
|
||||
vdi: newVdis[vbd.VDI],
|
||||
vm,
|
||||
})
|
||||
),
|
||||
|
||||
// Import VDI contents.
|
||||
asyncMap(newVdis, async (vdi, id) => {
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
@@ -1051,10 +1093,21 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(delta.vifs, vif => {
|
||||
const network =
|
||||
(vif.$network$uuid && this.getObject(vif.$network$uuid, null)) ||
|
||||
networksOnPoolMasterByDevice[vif.device] ||
|
||||
defaultNetwork
|
||||
let network =
|
||||
vif.$network$uuid && this.getObject(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
const { $network$VLAN: vlan = -1 } = vif
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan]
|
||||
if (networksByNameLabel !== undefined) {
|
||||
network = networksByNameLabel[vif.$network$name_label]
|
||||
if (network === undefined) {
|
||||
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
|
||||
}
|
||||
} else {
|
||||
network = defaultNetwork
|
||||
}
|
||||
}
|
||||
|
||||
if (network) {
|
||||
return this._createVif(vm, network, vif)
|
||||
@@ -1101,7 +1154,9 @@ export default class Xapi extends XapiBase {
|
||||
vdis[vdi.$ref] =
|
||||
mapVdisSrs && mapVdisSrs[vdi.$id]
|
||||
? hostXapi.getObject(mapVdisSrs[vdi.$id]).$ref
|
||||
: sr !== undefined ? hostXapi.getObject(sr).$ref : defaultSr.$ref // Will error if there are no default SR.
|
||||
: sr !== undefined
|
||||
? hostXapi.getObject(sr).$ref
|
||||
: defaultSr.$ref // Will error if there are no default SR.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1273,7 +1328,7 @@ export default class Xapi extends XapiBase {
|
||||
async _importOvaVm (
|
||||
$defer,
|
||||
stream,
|
||||
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus },
|
||||
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables },
|
||||
sr
|
||||
) {
|
||||
// 1. Create VM.
|
||||
@@ -1346,8 +1401,9 @@ export default class Xapi extends XapiBase {
|
||||
return
|
||||
}
|
||||
|
||||
const vhdStream = await vmdkToVhd(stream)
|
||||
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_RAW)
|
||||
const table = tables[entry.name]
|
||||
const vhdStream = await vmdkToVhd(stream, table)
|
||||
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
|
||||
|
||||
// See: https://github.com/mafintosh/tar-stream#extracting
|
||||
// No import parallelization.
|
||||
@@ -1420,7 +1476,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
}
|
||||
|
||||
@synchronized() // like @concurrency(1) but more efficient
|
||||
@concurrency(2)
|
||||
@cancelable
|
||||
async _snapshotVm ($cancelToken, vm, nameLabel = vm.name_label) {
|
||||
debug(
|
||||
@@ -1438,8 +1494,6 @@ export default class Xapi extends XapiBase {
|
||||
nameLabel
|
||||
).then(extractOpaqueRef)
|
||||
this.addTag(ref, 'quiesce')::ignoreErrors()
|
||||
|
||||
await this._waitObjectState(ref, vm => includes(vm.tags, 'quiesce'))
|
||||
} catch (error) {
|
||||
const { code } = error
|
||||
if (
|
||||
@@ -1463,7 +1517,7 @@ export default class Xapi extends XapiBase {
|
||||
// to-date object.
|
||||
const [, snapshot] = await Promise.all([
|
||||
this.call('VM.set_is_a_template', ref, false),
|
||||
this._waitObjectState(ref, snapshot => !snapshot.is_a_template),
|
||||
this.barrier(ref),
|
||||
])
|
||||
|
||||
return snapshot
|
||||
@@ -1995,7 +2049,9 @@ export default class Xapi extends XapiBase {
|
||||
name_label: name,
|
||||
name_description: description,
|
||||
MTU: asInteger(mtu),
|
||||
other_config: {},
|
||||
// Set automatic to false so XenCenter does not get confused
|
||||
// https://citrix.github.io/xenserver-sdk/#network
|
||||
other_config: { automatic: 'false' },
|
||||
})
|
||||
$defer.onFailure(() => this.call('network.destroy', networkRef))
|
||||
if (pifId) {
|
||||
|
||||
@@ -10,19 +10,22 @@ type AugmentedReadable = Readable & {
|
||||
type MaybeArray<T> = Array<T> | T
|
||||
|
||||
export type DeltaVmExport = {|
|
||||
streams: $Dict<() => Promise<AugmentedReadable>>,
|
||||
vbds: { [ref: string]: Object },
|
||||
vdis: { [ref: string]: {
|
||||
streams: $Dict < () => Promise < AugmentedReadable >>,
|
||||
vbds: { [ref: string]: Object },
|
||||
vdis: {
|
||||
[ref: string]: {
|
||||
$SR$uuid: string,
|
||||
snapshot_of: string,
|
||||
} },
|
||||
snapshot_of: string,
|
||||
}
|
||||
},
|
||||
version: '1.0.0',
|
||||
vifs: { [ref: string]: Object },
|
||||
vm: Vm,
|
||||
vm: Vm,
|
||||
|}
|
||||
|
||||
export type DeltaVmImport = {|
|
||||
...DeltaVmExport,
|
||||
streams: $Dict<MaybeArray<AugmentedReadable | () => Promise<AugmentedReadable>>>,
|
||||
streams: $Dict < MaybeArray < AugmentedReadable | () => Promise < AugmentedReadable >>>,
|
||||
|}
|
||||
|
||||
declare class XapiObject {
|
||||
@@ -32,8 +35,17 @@ declare class XapiObject {
|
||||
}
|
||||
|
||||
type Id = string | XapiObject
|
||||
|
||||
declare export class Vbd extends XapiObject {
|
||||
type: string;
|
||||
VDI: string;
|
||||
}
|
||||
|
||||
declare export class Vm extends XapiObject {
|
||||
$snapshots: Vm[];
|
||||
$VBDs: Vbd[];
|
||||
is_a_snapshot: boolean;
|
||||
is_a_template: boolean;
|
||||
name_label: string;
|
||||
other_config: $Dict<string>;
|
||||
snapshot_time: number;
|
||||
@@ -43,6 +55,7 @@ declare export class Vm extends XapiObject {
|
||||
declare export class Xapi {
|
||||
objects: { all: $Dict<Object> };
|
||||
|
||||
_assertHealthyVdiChains(vm: Vm): void;
|
||||
_importVm(
|
||||
cancelToken: mixed,
|
||||
stream: AugmentedReadable,
|
||||
@@ -52,7 +65,7 @@ declare export class Xapi {
|
||||
_updateObjectMapProperty(
|
||||
object: XapiObject,
|
||||
property: string,
|
||||
entries: $Dict<string>
|
||||
entries: $Dict<null | string>
|
||||
): Promise<void>;
|
||||
_setObjectProperties(
|
||||
object: XapiObject,
|
||||
@@ -65,17 +78,17 @@ declare export class Xapi {
|
||||
barrier(ref: string): XapiObject;
|
||||
deleteVm(vm: Id): Promise<void>;
|
||||
editVm(vm: Id, $Dict<mixed>): Promise<void>;
|
||||
exportDeltaVm(
|
||||
cancelToken: mixed,
|
||||
snapshot: Id,
|
||||
baseSnapshot?: Id
|
||||
): Promise<DeltaVmExport>;
|
||||
exportVm(
|
||||
cancelToken: mixed,
|
||||
vm: Vm,
|
||||
options?: Object
|
||||
): Promise<AugmentedReadable>;
|
||||
getObject(object: Id): XapiObject;
|
||||
importDeltaVm(data: DeltaVmImport, options: Object): Promise<{ vm: Vm }>;
|
||||
importVm(stream: AugmentedReadable, options: Object): Promise<Vm>;
|
||||
exportDeltaVm(
|
||||
cancelToken: mixed,
|
||||
snapshot: Id,
|
||||
baseSnapshot ?: Id
|
||||
): Promise<DeltaVmExport>;
|
||||
exportVm(
|
||||
cancelToken: mixed,
|
||||
vm: Vm,
|
||||
options ?: Object
|
||||
): Promise<AugmentedReadable>;
|
||||
getObject(object: Id): XapiObject;
|
||||
importDeltaVm(data: DeltaVmImport, options: Object): Promise<{ vm: Vm }>;
|
||||
importVm(stream: AugmentedReadable, options: Object): Promise<Vm>;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import deferrable from 'golike-defer'
|
||||
import every from 'lodash/every'
|
||||
import find from 'lodash/find'
|
||||
import filter from 'lodash/filter'
|
||||
import find from 'lodash/find'
|
||||
import includes from 'lodash/includes'
|
||||
import isObject from 'lodash/isObject'
|
||||
import pickBy from 'lodash/pickBy'
|
||||
import some from 'lodash/some'
|
||||
import sortBy from 'lodash/sortBy'
|
||||
import assign from 'lodash/assign'
|
||||
@@ -11,6 +12,7 @@ import unzip from 'julien-f-unzip'
|
||||
|
||||
import { debounce } from '../../decorators'
|
||||
import {
|
||||
asyncMap,
|
||||
ensureArray,
|
||||
forEach,
|
||||
mapFilter,
|
||||
@@ -149,9 +151,12 @@ export default {
|
||||
},
|
||||
|
||||
async listMissingPoolPatchesOnHost (hostId) {
|
||||
const host = this.getObject(hostId)
|
||||
// Returns an array to not break compatibility.
|
||||
return mapToArray(
|
||||
await this._listMissingPoolPatchesOnHost(this.getObject(hostId))
|
||||
await (host.software_version.product_brand === 'XCP-ng'
|
||||
? this._xcpListHostUpdates(host)
|
||||
: this._listMissingPoolPatchesOnHost(host))
|
||||
)
|
||||
},
|
||||
|
||||
@@ -440,15 +445,21 @@ export default {
|
||||
},
|
||||
|
||||
async installAllPoolPatchesOnHost (hostId) {
|
||||
let host = this.getObject(hostId)
|
||||
const host = this.getObject(hostId)
|
||||
if (host.software_version.product_brand === 'XCP-ng') {
|
||||
return this._xcpInstallHostUpdates(host)
|
||||
}
|
||||
return this._installAllPoolPatchesOnHost(host)
|
||||
},
|
||||
|
||||
async _installAllPoolPatchesOnHost (host) {
|
||||
const installableByUuid =
|
||||
host.license_params.sku_type !== 'free'
|
||||
? await this._listMissingPoolPatchesOnHost(host)
|
||||
: filter(await this._listMissingPoolPatchesOnHost(host), {
|
||||
paid: false,
|
||||
upgrade: false,
|
||||
})
|
||||
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
|
||||
paid: false,
|
||||
upgrade: false,
|
||||
})
|
||||
|
||||
// List of all installable patches sorted from the newest to the
|
||||
// oldest.
|
||||
@@ -479,6 +490,13 @@ export default {
|
||||
},
|
||||
|
||||
async installAllPoolPatchesOnAllHosts () {
|
||||
if (this.pool.$master.software_version.product_brand === 'XCP-ng') {
|
||||
return this._xcpInstallAllPoolUpdatesOnHost()
|
||||
}
|
||||
return this._installAllPoolPatchesOnAllHosts()
|
||||
},
|
||||
|
||||
async _installAllPoolPatchesOnAllHosts () {
|
||||
const installableByUuid = assign(
|
||||
{},
|
||||
...(await Promise.all(
|
||||
@@ -488,7 +506,7 @@ export default {
|
||||
patches =>
|
||||
host.license_params.sku_type !== 'free'
|
||||
? patches
|
||||
: filter(patches, { paid: false, upgrade: false })
|
||||
: pickBy(patches, { paid: false, upgrade: false })
|
||||
)
|
||||
}
|
||||
})
|
||||
@@ -518,4 +536,47 @@ export default {
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
// ----------------------------------
|
||||
// XCP-ng dedicated zone for patching
|
||||
// ----------------------------------
|
||||
|
||||
// list all yum updates available for a XCP-ng host
|
||||
async _xcpListHostUpdates (host) {
|
||||
return JSON.parse(
|
||||
await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'check_update',
|
||||
{}
|
||||
)
|
||||
)
|
||||
},
|
||||
|
||||
// install all yum updates for a XCP-ng host
|
||||
async _xcpInstallHostUpdates (host) {
|
||||
const update = await this.call(
|
||||
'host.call_plugin',
|
||||
host.$ref,
|
||||
'updater.py',
|
||||
'update',
|
||||
{}
|
||||
)
|
||||
|
||||
if (JSON.parse(update).exit !== 0) {
|
||||
throw new Error('Update install failed')
|
||||
} else {
|
||||
await this._updateObjectMapProperty(host, 'other_config', {
|
||||
rpm_patch_installation_time: String(Date.now() / 1000),
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
// install all yum updates for all XCP-ng hosts in a give pool
|
||||
async _xcpInstallAllPoolUpdatesOnHost () {
|
||||
await asyncMap(filter(this.objects.all, { $type: 'host' }), host =>
|
||||
this._xcpInstallHostUpdates(host)
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -310,11 +310,7 @@ export default {
|
||||
|
||||
highAvailability: {
|
||||
set (ha, vm) {
|
||||
return this.call(
|
||||
'VM.set_ha_restart_priority',
|
||||
vm.$ref,
|
||||
ha ? 'restart' : ''
|
||||
)
|
||||
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -384,6 +380,8 @@ export default {
|
||||
|
||||
tags: true,
|
||||
|
||||
hasVendorDevice: true,
|
||||
|
||||
vga: {
|
||||
set (vga, vm) {
|
||||
if (!includes(XEN_VGA_VALUES, vga)) {
|
||||
|
||||
@@ -154,7 +154,8 @@ export default class {
|
||||
id: await generateToken(),
|
||||
user_id: userId,
|
||||
expiration:
|
||||
Date.now() + (typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
|
||||
Date.now() +
|
||||
(typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
|
||||
})
|
||||
|
||||
await this._tokens.add(token)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user