Compare commits
277 Commits
xen-api-v0
...
xo-server-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcde9e0f74 | ||
|
|
114501ebc7 | ||
|
|
ebab7c0867 | ||
|
|
0e2270fb6e | ||
|
|
593493ec0c | ||
|
|
d92898a806 | ||
|
|
7890e46551 | ||
|
|
ef942a6209 | ||
|
|
fdde916388 | ||
|
|
31314d201b | ||
|
|
a29a949c51 | ||
|
|
cc1ce8c5f8 | ||
|
|
a21bf4ebe5 | ||
|
|
3d0420dbd9 | ||
|
|
04c74dd30f | ||
|
|
2f256291ae | ||
|
|
bcb66a4145 | ||
|
|
2d9368062e | ||
|
|
b110bacf61 | ||
|
|
78afdc0af5 | ||
|
|
ad6cd7985a | ||
|
|
a61661776d | ||
|
|
1a9ebddcab | ||
|
|
7ab907a854 | ||
|
|
68a34f7cdb | ||
|
|
da4ff3082d | ||
|
|
9c05a59b5f | ||
|
|
6780146505 | ||
|
|
2758833fc6 | ||
|
|
2786d7ec46 | ||
|
|
945a2006c9 | ||
|
|
b9e574e32f | ||
|
|
34f1ef1680 | ||
|
|
4ac4310bc1 | ||
|
|
a10997ca66 | ||
|
|
0e52a4c7dc | ||
|
|
a4b3e22c2b | ||
|
|
441bd7c754 | ||
|
|
ddbd32d1cb | ||
|
|
a5b0cbeaea | ||
|
|
c6f3b2b1ce | ||
|
|
3d869d9fa1 | ||
|
|
7a5229741f | ||
|
|
78e0c2d8fa | ||
|
|
5928984069 | ||
|
|
61a472f108 | ||
|
|
e45f78ea20 | ||
|
|
b3ae9d88eb | ||
|
|
f7f26537be | ||
|
|
96848fc6d4 | ||
|
|
51e6f0c79f | ||
|
|
4f94ad40b7 | ||
|
|
43e1eb9939 | ||
|
|
1f6d7de861 | ||
|
|
bd623c2daf | ||
|
|
40c71c2102 | ||
|
|
72a1580eff | ||
|
|
9e2404a0d7 | ||
|
|
7dd84d1518 | ||
|
|
d800db5d09 | ||
|
|
2714ccff38 | ||
|
|
1d493e411b | ||
|
|
2a0c222f2d | ||
|
|
641d68de0e | ||
|
|
2dd0fd660b | ||
|
|
bb5441c7bc | ||
|
|
eeea9e662b | ||
|
|
8d4874e240 | ||
|
|
a8ba4a1a8e | ||
|
|
0c027247ec | ||
|
|
164cb39c1b | ||
|
|
52503de645 | ||
|
|
83b8b5de61 | ||
|
|
3e326c4e62 | ||
|
|
a6b0690416 | ||
|
|
dcd007c5c7 | ||
|
|
eb090e4874 | ||
|
|
4b716584f7 | ||
|
|
4bc348f39f | ||
|
|
9c75992fe4 | ||
|
|
4bb2702ac5 | ||
|
|
ea8133cb41 | ||
|
|
fc40c7b03d | ||
|
|
7fe5b66fdb | ||
|
|
0f1d052493 | ||
|
|
56a182f795 | ||
|
|
e8da1b943b | ||
|
|
3913b0eba1 | ||
|
|
7990e45095 | ||
|
|
a7068ec166 | ||
|
|
55b35ac0cf | ||
|
|
a251f8ca75 | ||
|
|
172ce2c7a1 | ||
|
|
3cef668a75 | ||
|
|
e6deb29070 | ||
|
|
51609d45a2 | ||
|
|
5cb6dc6d92 | ||
|
|
c5174a61b7 | ||
|
|
93e987982c | ||
|
|
fc421428fd | ||
|
|
7400bd657a | ||
|
|
da62cba3f8 | ||
|
|
461cc7e547 | ||
|
|
b898ed4785 | ||
|
|
149530e73f | ||
|
|
7e627c953e | ||
|
|
bc86984f19 | ||
|
|
e40f3acdd4 | ||
|
|
63d93224e0 | ||
|
|
c87356c319 | ||
|
|
74f4a83aea | ||
|
|
e67038a04d | ||
|
|
1fa73b57a2 | ||
|
|
73c746fdd3 | ||
|
|
ab1413b741 | ||
|
|
c087eaf229 | ||
|
|
8b9f9ffa3e | ||
|
|
a83fa90d87 | ||
|
|
505f06c1d8 | ||
|
|
2ac1093543 | ||
|
|
b3d8ce2041 | ||
|
|
b47789bf82 | ||
|
|
0a5e1a9bce | ||
|
|
f333679319 | ||
|
|
20d3faa306 | ||
|
|
cf11ed0830 | ||
|
|
acd390ac42 | ||
|
|
8a2fbe3ab5 | ||
|
|
7a6e7ec153 | ||
|
|
7d90346c91 | ||
|
|
abb5193ced | ||
|
|
52e845834e | ||
|
|
c1c17fad44 | ||
|
|
d7b4025893 | ||
|
|
934356571c | ||
|
|
738d98eb42 | ||
|
|
7e689076d8 | ||
|
|
0b9d031965 | ||
|
|
53f470518b | ||
|
|
664d648435 | ||
|
|
0d718bd632 | ||
|
|
ed5e0c3509 | ||
|
|
20d5047b55 | ||
|
|
4cfe3ec06e | ||
|
|
87664ff16a | ||
|
|
adf278fc83 | ||
|
|
a4d0fa62d2 | ||
|
|
ff59d091f1 | ||
|
|
4cac99d79a | ||
|
|
d1a046279d | ||
|
|
cb9fa5c42b | ||
|
|
05f9e6895b | ||
|
|
63b5ee6f96 | ||
|
|
36d2de049f | ||
|
|
86b0d5e2b7 | ||
|
|
d34f641130 | ||
|
|
39d7b4c7bd | ||
|
|
ad0d4156fb | ||
|
|
80187e2789 | ||
|
|
89e25c9b81 | ||
|
|
ca51d59815 | ||
|
|
433f445e99 | ||
|
|
474a765e1b | ||
|
|
7d4b17380d | ||
|
|
b58b1d94cd | ||
|
|
16e7257e3b | ||
|
|
ca1a46f980 | ||
|
|
596bd12f59 | ||
|
|
301ab65c01 | ||
|
|
35f210e074 | ||
|
|
c239b518e0 | ||
|
|
f45935aa44 | ||
|
|
782505b292 | ||
|
|
1368e3b86d | ||
|
|
ab9c24401e | ||
|
|
831f4e48d1 | ||
|
|
f5511449af | ||
|
|
80c1e39b53 | ||
|
|
3ce4e86784 | ||
|
|
fb617418bb | ||
|
|
9fb0f793b2 | ||
|
|
3b21a097ab | ||
|
|
ef09a42a89 | ||
|
|
74d8f2a859 | ||
|
|
48910f9c0f | ||
|
|
788a1accbd | ||
|
|
b254e7e852 | ||
|
|
e288fa1b8a | ||
|
|
eb9ec68494 | ||
|
|
10ab4f2d79 | ||
|
|
b1986dc275 | ||
|
|
831e36ae5f | ||
|
|
77a2d37d98 | ||
|
|
37b90e25dc | ||
|
|
41f16846b6 | ||
|
|
3e89c62e72 | ||
|
|
b7d3762c06 | ||
|
|
481bc9430a | ||
|
|
13f2470887 | ||
|
|
0308fe4e6e | ||
|
|
197273193e | ||
|
|
e4b11a793b | ||
|
|
927d3135c4 | ||
|
|
aa533c20d6 | ||
|
|
7fd615525a | ||
|
|
6abf3fc0af | ||
|
|
6bb0929822 | ||
|
|
feebc04e55 | ||
|
|
2d406cd7c1 | ||
|
|
788bfe632f | ||
|
|
1149102f37 | ||
|
|
8bd949f618 | ||
|
|
489b142a66 | ||
|
|
cbbbb6da4f | ||
|
|
6701c7e3af | ||
|
|
ecd460a991 | ||
|
|
b4d7648ffe | ||
|
|
eb3dfb0f30 | ||
|
|
2b9ba69480 | ||
|
|
8f784162ea | ||
|
|
a2ab64b142 | ||
|
|
052817ccbf | ||
|
|
48b2297bc1 | ||
|
|
e76a0ad4bd | ||
|
|
baf6d30348 | ||
|
|
7d250dd90b | ||
|
|
efaabb02e8 | ||
|
|
0c3b98d451 | ||
|
|
28d1539ea6 | ||
|
|
8ad02d2d51 | ||
|
|
1947a066e0 | ||
|
|
d99e643634 | ||
|
|
65e1ac2ef9 | ||
|
|
64a768090f | ||
|
|
488eed046e | ||
|
|
dccddd78a6 | ||
|
|
3c247abcf9 | ||
|
|
db795e91fd | ||
|
|
f060f56c93 | ||
|
|
51be573f5e | ||
|
|
4257cbb618 | ||
|
|
e25d6b712d | ||
|
|
b499d60130 | ||
|
|
68e06303a4 | ||
|
|
60085798f2 | ||
|
|
c62cab39f1 | ||
|
|
30483ab2d9 | ||
|
|
c38c716616 | ||
|
|
ded1127d64 | ||
|
|
38d6130e89 | ||
|
|
ee47e40d1a | ||
|
|
80e66415d7 | ||
|
|
81e6372070 | ||
|
|
dbfbd42d29 | ||
|
|
e0d34b1747 | ||
|
|
9a8f9dd1d7 | ||
|
|
75521f8757 | ||
|
|
11d4cb2f04 | ||
|
|
d90cb09b56 | ||
|
|
a02d393457 | ||
|
|
01a5963947 | ||
|
|
7ef314d9f4 | ||
|
|
2ff25d1f61 | ||
|
|
ede12b6732 | ||
|
|
8a010f62fd | ||
|
|
51da4a7e70 | ||
|
|
fd2580f5da | ||
|
|
c5fdab7d47 | ||
|
|
ae094438b1 | ||
|
|
3e5af9e894 | ||
|
|
10093afb91 | ||
|
|
58032738b9 | ||
|
|
89cbbaeeea | ||
|
|
5ca08eb400 | ||
|
|
fad049d2ac | ||
|
|
87466cb5bd | ||
|
|
bb69ad8019 |
@@ -2,13 +2,21 @@ module.exports = {
|
||||
extends: ['standard', 'standard-jsx'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
$Diff: true,
|
||||
$Exact: true,
|
||||
$Keys: true,
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
parser: 'babel-eslint',
|
||||
rules: {
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
indent: 'off',
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
'react/jsx-indent': 'off',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
[lints]
|
||||
|
||||
[options]
|
||||
esproposal.decorators=ignore
|
||||
include_warnings=true
|
||||
module.use_strict=true
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,6 +8,8 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
module.exports = {
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
}
|
||||
|
||||
12
.travis.yml
12
.travis.yml
@@ -1,12 +1,18 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- stable
|
||||
#- stable # disable for now due to an issue of indirect dep upath with Node 9
|
||||
- 8
|
||||
- 6
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
@@ -14,3 +20,7 @@ before_install:
|
||||
|
||||
cache:
|
||||
yarn: true
|
||||
|
||||
script:
|
||||
- yarn run test
|
||||
- yarn run test-integration
|
||||
|
||||
47
@xen-orchestra/babel-config/index.js
Normal file
47
@xen-orchestra/babel-config/index.js
Normal file
@@ -0,0 +1,47 @@
|
||||
'use strict'
|
||||
|
||||
const PLUGINS_RE = /^(?:@babel\/plugin-.+|babel-plugin-lodash)$/
|
||||
const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
presets === undefined && (presets = {})
|
||||
presets['@babel/preset-env'] = {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
}
|
||||
11
@xen-orchestra/babel-config/package.json
Normal file
11
@xen-orchestra/babel-config/package.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/babel-config",
|
||||
"version": "0.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
}
|
||||
}
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.3",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -14,7 +14,7 @@
|
||||
"scheduling",
|
||||
"task"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -38,13 +38,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"luxon": "^0.5.2"
|
||||
"moment-timezone": "^0.5.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { DateTime } from 'luxon'
|
||||
import moment from 'moment-timezone'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
@@ -41,7 +41,10 @@ class Job {
|
||||
class Schedule {
|
||||
constructor (pattern, zone = 'utc') {
|
||||
this._schedule = parse(pattern)
|
||||
this._dateTimeOpts = { zone }
|
||||
this._createDate =
|
||||
zone.toLowerCase() === 'utc'
|
||||
? moment.utc
|
||||
: zone === 'local' ? moment : () => moment.tz(zone)
|
||||
}
|
||||
|
||||
createJob (fn) {
|
||||
@@ -51,15 +54,15 @@ class Schedule {
|
||||
next (n) {
|
||||
const dates = new Array(n)
|
||||
const schedule = this._schedule
|
||||
let date = DateTime.fromObject(this._dateTimeOpts)
|
||||
let date = this._createDate()
|
||||
for (let i = 0; i < n; ++i) {
|
||||
dates[i] = (date = next(schedule, date)).toJSDate()
|
||||
dates[i] = (date = next(schedule, date)).toDate()
|
||||
}
|
||||
return dates
|
||||
}
|
||||
|
||||
_nextDelay () {
|
||||
const now = DateTime.fromObject(this._dateTimeOpts)
|
||||
const now = this._createDate()
|
||||
return next(this._schedule, now) - now
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import moment from 'moment-timezone'
|
||||
import sortedIndex from 'lodash/sortedIndex'
|
||||
import { DateTime } from 'luxon'
|
||||
|
||||
const NEXT_MAPPING = {
|
||||
month: { year: 1 },
|
||||
day: { month: 1 },
|
||||
weekday: { week: 1 },
|
||||
date: { month: 1 },
|
||||
day: { week: 1 },
|
||||
hour: { day: 1 },
|
||||
minute: { hour: 1 },
|
||||
}
|
||||
@@ -13,38 +13,37 @@ const getFirst = values => (values !== undefined ? values[0] : 0)
|
||||
|
||||
const setFirstAvailable = (date, unit, values) => {
|
||||
if (values === undefined) {
|
||||
return date
|
||||
return
|
||||
}
|
||||
|
||||
const curr = date.get(unit)
|
||||
const next = values[sortedIndex(values, curr) % values.length]
|
||||
if (curr === next) {
|
||||
return date
|
||||
return
|
||||
}
|
||||
|
||||
const newDate = date.set({ [unit]: next })
|
||||
return newDate > date ? newDate : newDate.plus(NEXT_MAPPING[unit])
|
||||
const timestamp = +date
|
||||
date.set(unit, next)
|
||||
if (timestamp > +date) {
|
||||
date.add(NEXT_MAPPING[unit])
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// returns the next run, after the passed date
|
||||
export default (schedule, fromDate) => {
|
||||
let date = fromDate
|
||||
let date = moment(fromDate)
|
||||
.set({
|
||||
second: 0,
|
||||
millisecond: 0,
|
||||
})
|
||||
.plus({ minute: 1 })
|
||||
.add({ minute: 1 })
|
||||
|
||||
const { minute, hour, dayOfMonth, month, dayOfWeek } = schedule
|
||||
date = setFirstAvailable(date, 'minute', minute)
|
||||
setFirstAvailable(date, 'minute', minute)
|
||||
|
||||
let tmp
|
||||
|
||||
tmp = setFirstAvailable(date, 'hour', hour)
|
||||
if (tmp !== date) {
|
||||
date = tmp.set({
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
if (setFirstAvailable(date, 'hour', hour)) {
|
||||
date.set('minute', getFirst(minute))
|
||||
}
|
||||
|
||||
let loop
|
||||
@@ -52,30 +51,30 @@ export default (schedule, fromDate) => {
|
||||
do {
|
||||
loop = false
|
||||
|
||||
tmp = setFirstAvailable(date, 'month', month)
|
||||
if (tmp !== date) {
|
||||
date = tmp.set({
|
||||
day: 1,
|
||||
if (setFirstAvailable(date, 'month', month)) {
|
||||
date.set({
|
||||
date: 1,
|
||||
hour: getFirst(hour),
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
}
|
||||
|
||||
let newDate = date.clone()
|
||||
if (dayOfMonth === undefined) {
|
||||
if (dayOfWeek !== undefined) {
|
||||
tmp = setFirstAvailable(date, 'weekday', dayOfWeek)
|
||||
setFirstAvailable(newDate, 'day', dayOfWeek)
|
||||
}
|
||||
} else if (dayOfWeek === undefined) {
|
||||
tmp = setFirstAvailable(date, 'day', dayOfMonth)
|
||||
setFirstAvailable(newDate, 'date', dayOfMonth)
|
||||
} else {
|
||||
tmp = DateTime.min(
|
||||
setFirstAvailable(date, 'day', dayOfMonth),
|
||||
setFirstAvailable(date, 'weekday', dayOfWeek)
|
||||
)
|
||||
const dateDay = newDate.clone()
|
||||
setFirstAvailable(dateDay, 'date', dayOfMonth)
|
||||
setFirstAvailable(newDate, 'day', dayOfWeek)
|
||||
newDate = moment.min(dateDay, newDate)
|
||||
}
|
||||
if (tmp !== date) {
|
||||
loop = tmp.month !== date.month
|
||||
date = tmp.set({
|
||||
if (+date !== +newDate) {
|
||||
loop = date.month() !== newDate.month()
|
||||
date = newDate.set({
|
||||
hour: getFirst(hour),
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import mapValues from 'lodash/mapValues'
|
||||
import { DateTime } from 'luxon'
|
||||
import moment from 'moment-timezone'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
|
||||
const N = (pattern, fromDate = '2018-04-09T06:25') =>
|
||||
next(parse(pattern), DateTime.fromISO(fromDate, { zone: 'utc' })).toISO({
|
||||
includeOffset: false,
|
||||
suppressMilliseconds: true,
|
||||
suppressSeconds: true,
|
||||
})
|
||||
const N = (pattern, fromDate = '2018-04-09T06:25') => {
|
||||
const iso = next(parse(pattern), moment.utc(fromDate)).toISOString()
|
||||
return iso.slice(0, iso.lastIndexOf(':'))
|
||||
}
|
||||
|
||||
describe('next()', () => {
|
||||
mapValues(
|
||||
@@ -43,4 +41,8 @@ describe('next()', () => {
|
||||
'no solutions found for this schedule'
|
||||
)
|
||||
})
|
||||
|
||||
it('select the first sunday of the month', () => {
|
||||
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -173,18 +173,13 @@ export default createParser({
|
||||
{
|
||||
aliases: 'jan feb mar apr may jun jul aug sep oct nov dec'.split(' '),
|
||||
name: 'month',
|
||||
range: [1, 12],
|
||||
|
||||
// this function is applied to numeric entries (not steps)
|
||||
//
|
||||
// currently parse month 0-11
|
||||
post: value => value + 1,
|
||||
range: [0, 11],
|
||||
},
|
||||
{
|
||||
aliases: 'mon tue wen thu fri sat sun'.split(' '),
|
||||
aliases: 'sun mon tue wen thu fri sat'.split(' '),
|
||||
name: 'dayOfWeek',
|
||||
post: value => (value === 0 ? 7 : value),
|
||||
range: [1, 7],
|
||||
post: value => (value === 7 ? 0 : value),
|
||||
range: [0, 6],
|
||||
},
|
||||
],
|
||||
presets: {
|
||||
|
||||
@@ -8,22 +8,22 @@ describe('parse()', () => {
|
||||
minute: [0],
|
||||
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
dayOfMonth: [1, 11, 21, 31],
|
||||
month: [1, 3, 5, 8, 11],
|
||||
month: [0, 2, 4, 7, 10],
|
||||
})
|
||||
})
|
||||
|
||||
it('correctly parse months', () => {
|
||||
expect(parse('* * * 0,11 *')).toEqual({
|
||||
month: [1, 12],
|
||||
month: [0, 11],
|
||||
})
|
||||
expect(parse('* * * jan,dec *')).toEqual({
|
||||
month: [1, 12],
|
||||
month: [0, 11],
|
||||
})
|
||||
})
|
||||
|
||||
it('correctly parse days', () => {
|
||||
expect(parse('* * * * mon,sun')).toEqual({
|
||||
dayOfWeek: [1, 7],
|
||||
dayOfWeek: [0, 1],
|
||||
})
|
||||
})
|
||||
|
||||
@@ -40,10 +40,10 @@ describe('parse()', () => {
|
||||
|
||||
it('dayOfWeek: 0 and 7 bind to sunday', () => {
|
||||
expect(parse('* * * * 0')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
expect(parse('* * * * 7')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
3
@xen-orchestra/fs/.babelrc.js
Normal file
3
@xen-orchestra/fs/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
54
@xen-orchestra/fs/package.json
Normal file
54
@xen-orchestra/fs/package.json
Normal file
@@ -0,0 +1,54 @@
|
||||
{
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"execa": "^0.10.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"xo-remote-parser": "^0.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
288
@xen-orchestra/fs/src/abstract.js
Normal file
288
@xen-orchestra/fs/src/abstract.js
Normal file
@@ -0,0 +1,288 @@
|
||||
// @flow
|
||||
|
||||
import getStream from 'get-stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { type Readable, type Writable } from 'stream'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
type Data = Buffer | Readable | string
|
||||
type FileDescriptor = {| fd: mixed, path: string |}
|
||||
type LaxReadable = Readable & Object
|
||||
type LaxWritable = Writable & Object
|
||||
|
||||
type File = FileDescriptor | string
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
|
||||
export default class RemoteHandlerAbstract {
|
||||
_remote: Object
|
||||
constructor (remote: any) {
|
||||
this._remote = { ...remote, ...parse(remote.url) }
|
||||
if (this._remote.type !== this.type) {
|
||||
throw new Error('Incorrect remote type')
|
||||
}
|
||||
}
|
||||
|
||||
get type (): string {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks the handler to sync the state of the effective remote with its' metadata
|
||||
*/
|
||||
async sync (): Promise<mixed> {
|
||||
return this._sync()
|
||||
}
|
||||
|
||||
async _sync (): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the resources possibly dedicated to put the remote at work, when it is no more needed
|
||||
*/
|
||||
async forget (): Promise<void> {
|
||||
await this._forget()
|
||||
}
|
||||
|
||||
async _forget (): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async test (): Promise<Object> {
|
||||
const testFileName = `${Date.now()}.test`
|
||||
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
|
||||
let step = 'write'
|
||||
try {
|
||||
await this.outputFile(testFileName, data)
|
||||
step = 'read'
|
||||
const read = await this.readFile(testFileName)
|
||||
if (data.compare(read) !== 0) {
|
||||
throw new Error('output and input did not match')
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
step,
|
||||
file: testFileName,
|
||||
error: error.message || String(error),
|
||||
}
|
||||
} finally {
|
||||
ignoreErrors.call(this.unlink(testFileName))
|
||||
}
|
||||
}
|
||||
|
||||
async outputFile (file: string, data: Data, options?: Object): Promise<void> {
|
||||
return this._outputFile(file, data, {
|
||||
flags: 'wx',
|
||||
...options,
|
||||
})
|
||||
}
|
||||
|
||||
async _outputFile (file: string, data: Data, options?: Object): Promise<void> {
|
||||
const stream = await this.createOutputStream(file, options)
|
||||
const promise = fromEvent(stream, 'finish')
|
||||
stream.end(data)
|
||||
await promise
|
||||
}
|
||||
|
||||
async readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this._readFile(file, options)
|
||||
}
|
||||
|
||||
_readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this.createReadStream(file, options).then(getStream.buffer)
|
||||
}
|
||||
|
||||
async rename (
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
{ checksum = false }: Object = {}
|
||||
) {
|
||||
let p = this._rename(oldPath, newPath)
|
||||
if (checksum) {
|
||||
p = Promise.all([
|
||||
p,
|
||||
this._rename(checksumFile(oldPath), checksumFile(newPath)),
|
||||
])
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
async _rename (oldPath: string, newPath: string) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async list (
|
||||
dir: string = '.',
|
||||
{
|
||||
filter,
|
||||
prependDir = false,
|
||||
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
): Promise<string[]> {
|
||||
const entries = await this._list(dir)
|
||||
|
||||
if (prependDir) {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = dir + '/' + entry
|
||||
})
|
||||
}
|
||||
|
||||
return filter === undefined ? entries : entries.filter(filter)
|
||||
}
|
||||
|
||||
async _list (dir: string): Promise<string[]> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
createReadStream (
|
||||
file: string,
|
||||
{ checksum = false, ignoreMissingChecksum = false, ...options }: Object = {}
|
||||
): Promise<LaxReadable> {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = this._createReadStream(file, options).then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (
|
||||
stream.length === undefined &&
|
||||
options.end === undefined &&
|
||||
options.start === undefined
|
||||
) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this.getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
|
||||
// avoid a unhandled rejection warning
|
||||
ignoreErrors.call(streamP)
|
||||
|
||||
return this.readFile(checksumFile(path)).then(
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = (validChecksumOfReadStream(
|
||||
stream,
|
||||
String(checksum).trim()
|
||||
): LaxReadable)
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
}),
|
||||
error => {
|
||||
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
|
||||
return streamP
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _createReadStream (
|
||||
file: string,
|
||||
options?: Object
|
||||
): Promise<LaxReadable> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async openFile (path: string, flags?: string): Promise<FileDescriptor> {
|
||||
return { fd: await this._openFile(path, flags), path }
|
||||
}
|
||||
|
||||
async _openFile (path: string, flags?: string): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async closeFile (fd: FileDescriptor): Promise<void> {
|
||||
await this._closeFile(fd.fd)
|
||||
}
|
||||
|
||||
async _closeFile (fd: mixed): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async refreshChecksum (path: string): Promise<void> {
|
||||
const stream = (await this.createReadStream(path)).pipe(
|
||||
createChecksumStream()
|
||||
)
|
||||
stream.resume() // start reading the whole file
|
||||
await this.outputFile(checksumFile(path), await stream.checksum)
|
||||
}
|
||||
|
||||
async createOutputStream (
|
||||
file: File,
|
||||
{ checksum = false, ...options }: Object = {}
|
||||
): Promise<LaxWritable> {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = this._createOutputStream(file, {
|
||||
flags: 'wx',
|
||||
...options,
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
|
||||
const stream = await streamP
|
||||
stream.on('error', forwardError)
|
||||
checksumStream.pipe(stream)
|
||||
|
||||
// $FlowFixMe
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value => this.outputFile(checksumFile(path), value))
|
||||
.catch(forwardError)
|
||||
|
||||
return checksumStream
|
||||
}
|
||||
|
||||
async _createOutputStream (
|
||||
file: mixed,
|
||||
options?: Object
|
||||
): Promise<LaxWritable> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async unlink (file: string, { checksum = true }: Object = {}): Promise<void> {
|
||||
if (checksum) {
|
||||
ignoreErrors.call(this._unlink(checksumFile(file)))
|
||||
}
|
||||
|
||||
await this._unlink(file)
|
||||
}
|
||||
|
||||
async _unlink (file: mixed): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async getSize (file: mixed): Promise<number> {
|
||||
return this._getSize(file)
|
||||
}
|
||||
|
||||
async _getSize (file: mixed): Promise<number> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
}
|
||||
100
@xen-orchestra/fs/src/checksum.js
Normal file
100
@xen-orchestra/fs/src/checksum.js
Normal file
@@ -0,0 +1,100 @@
|
||||
// @flow
|
||||
|
||||
// $FlowFixMe
|
||||
import through2 from 'through2'
|
||||
import { createHash } from 'crypto'
|
||||
import { defer, fromEvent } from 'promise-toolbox'
|
||||
import { invert } from 'lodash'
|
||||
import { type Readable, type Transform } from 'stream'
|
||||
|
||||
// Format: $<algorithm>$<salt>$<encrypted>
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ALGORITHM_TO_ID = {
|
||||
md5: '1',
|
||||
sha256: '5',
|
||||
sha512: '6',
|
||||
}
|
||||
|
||||
const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
|
||||
|
||||
// Create a through stream which computes the checksum of all data going
|
||||
// through.
|
||||
//
|
||||
// The `checksum` attribute is a promise which resolves at the end of the stream
|
||||
// with a string representation of the checksum.
|
||||
//
|
||||
// const source = ...
|
||||
// const checksumStream = source.pipe(createChecksumStream())
|
||||
// checksumStream.resume() // make the data flow without an output
|
||||
// console.log(await checksumStream.checksum)
|
||||
export const createChecksumStream = (
|
||||
algorithm: string = 'md5'
|
||||
): Transform & { checksum: Promise<string> } => {
|
||||
const algorithmId = ALGORITHM_TO_ID[algorithm]
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithm}`)
|
||||
}
|
||||
|
||||
const hash = createHash(algorithm)
|
||||
const { promise, resolve, reject } = defer()
|
||||
|
||||
const stream = through2(
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
resolve(`$${algorithmId}$$${hash.digest('hex')}`)
|
||||
callback()
|
||||
}
|
||||
).once('error', reject)
|
||||
stream.checksum = promise
|
||||
return stream
|
||||
}
|
||||
|
||||
// Check if the checksum of a readable stream is equals to an expected checksum.
|
||||
// The given stream is wrapped in a stream which emits an error event
|
||||
// if the computed checksum is not equals to the expected checksum.
|
||||
export const validChecksumOfReadStream = (
|
||||
stream: Readable,
|
||||
expectedChecksum: string
|
||||
): Readable & { checksumVerified: Promise<void> } => {
|
||||
const algorithmId = expectedChecksum.slice(
|
||||
1,
|
||||
expectedChecksum.indexOf('$', 1)
|
||||
)
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithmId}`)
|
||||
}
|
||||
|
||||
const hash = createHash(ID_TO_ALGORITHM[algorithmId])
|
||||
|
||||
const wrapper: any = stream.pipe(
|
||||
through2(
|
||||
{ highWaterMark: 0 },
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
|
||||
|
||||
callback(
|
||||
checksum !== expectedChecksum
|
||||
? new Error(
|
||||
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
|
||||
)
|
||||
: null
|
||||
)
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
stream.on('error', error => wrapper.emit('error', error))
|
||||
wrapper.checksumVerified = fromEvent(wrapper, 'end')
|
||||
|
||||
return wrapper
|
||||
}
|
||||
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
26
@xen-orchestra/fs/src/fs.integ.spec.js
Normal file
@@ -0,0 +1,26 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
import { getHandler } from '.'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test("fs test doesn't crash", async () => {
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const result = await handler.test()
|
||||
expect(result.success).toBeTruthy()
|
||||
})
|
||||
26
@xen-orchestra/fs/src/index.js
Normal file
26
@xen-orchestra/fs/src/index.js
Normal file
@@ -0,0 +1,26 @@
|
||||
// @flow
|
||||
|
||||
import type RemoteHandler from './abstract'
|
||||
import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
|
||||
export type { default as RemoteHandler } from './abstract'
|
||||
export type Remote = { url: string }
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
smb: RemoteHandlerSmb,
|
||||
nfs: RemoteHandlerNfs,
|
||||
}
|
||||
|
||||
export const getHandler = (remote: Remote): RemoteHandler => {
|
||||
// FIXME: should be done in xo-remote-parser.
|
||||
const type = remote.url.split('://')[0]
|
||||
|
||||
const Handler = HANDLERS[type]
|
||||
if (!Handler) {
|
||||
throw new Error('Unhandled remote type')
|
||||
}
|
||||
return new Handler(remote)
|
||||
}
|
||||
@@ -63,13 +63,29 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createReadStream (file, options) {
|
||||
return fs.createReadStream(this._getFilePath(file), options)
|
||||
if (typeof file === 'string') {
|
||||
return fs.createReadStream(this._getFilePath(file), options)
|
||||
} else {
|
||||
return fs.createReadStream('', {
|
||||
autoClose: false,
|
||||
...options,
|
||||
fd: file.fd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _createOutputStream (file, options) {
|
||||
const path = this._getFilePath(file)
|
||||
await fs.ensureDir(dirname(path))
|
||||
return fs.createWriteStream(path, options)
|
||||
if (typeof file === 'string') {
|
||||
const path = this._getFilePath(file)
|
||||
await fs.ensureDir(dirname(path))
|
||||
return fs.createWriteStream(path, options)
|
||||
} else {
|
||||
return fs.createWriteStream('', {
|
||||
autoClose: false,
|
||||
...options,
|
||||
fd: file.fd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _unlink (file) {
|
||||
@@ -82,7 +98,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize (file) {
|
||||
const stats = await fs.stat(this._getFilePath(file))
|
||||
const stats = await fs.stat(
|
||||
this._getFilePath(typeof file === 'string' ? file : file.path)
|
||||
)
|
||||
return stats.size
|
||||
}
|
||||
|
||||
async _openFile (path, flags) {
|
||||
return fs.open(this._getFilePath(path), flags)
|
||||
}
|
||||
|
||||
async _closeFile (fd) {
|
||||
return fs.close(fd)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
import Smb2 from '@marsaud/smb2-promise'
|
||||
import { lastly as pFinally } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { noop, pFinally } from '../utils'
|
||||
|
||||
const noop = () => {}
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const normalizeError = error => {
|
||||
@@ -139,6 +141,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createReadStream (file, options = {}) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.path
|
||||
}
|
||||
const client = this._getClient(this._remote)
|
||||
let stream
|
||||
|
||||
@@ -154,6 +159,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createOutputStream (file, options = {}) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.path
|
||||
}
|
||||
const client = this._getClient(this._remote)
|
||||
const path = this._getFilePath(file)
|
||||
const dir = this._dirname(path)
|
||||
@@ -188,13 +196,22 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
let size
|
||||
|
||||
try {
|
||||
size = await client.getSize(this._getFilePath(file))::pFinally(() => {
|
||||
client.close()
|
||||
})
|
||||
size = await client
|
||||
.getSize(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
::pFinally(() => {
|
||||
client.close()
|
||||
})
|
||||
} catch (error) {
|
||||
throw normalizeError(error)
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// this is a fake
|
||||
async _openFile (path) {
|
||||
return this._getFilePath(path)
|
||||
}
|
||||
|
||||
async _closeFile (fd) {}
|
||||
}
|
||||
@@ -1,121 +1,286 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.20.0** (planned 2018-05-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
|
||||
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
|
||||
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
|
||||
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
|
||||
|
||||
### Bugs
|
||||
|
||||
## **5.19.0** (2018-05-01)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
|
||||
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
|
||||
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
|
||||
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
|
||||
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
|
||||
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
|
||||
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
|
||||
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
|
||||
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
|
||||
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
|
||||
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
|
||||
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
|
||||
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
|
||||
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
|
||||
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
|
||||
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
|
||||
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
|
||||
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
|
||||
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
|
||||
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
|
||||
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
|
||||
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
|
||||
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
|
||||
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
|
||||
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
|
||||
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
|
||||
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
|
||||
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
|
||||
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
|
||||
|
||||
## **5.18.0** (2018-03-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
|
||||
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
|
||||
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
|
||||
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
|
||||
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
|
||||
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
|
||||
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
|
||||
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
|
||||
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
|
||||
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
|
||||
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
|
||||
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
|
||||
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
|
||||
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
|
||||
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
|
||||
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
|
||||
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
|
||||
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
|
||||
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
|
||||
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
|
||||
|
||||
|
||||
### Bugs
|
||||
|
||||
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
|
||||
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
|
||||
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
|
||||
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
|
||||
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
|
||||
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
|
||||
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
|
||||
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
|
||||
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
|
||||
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
|
||||
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
|
||||
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
|
||||
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
|
||||
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
|
||||
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
|
||||
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
|
||||
|
||||
## **5.17.0** (2018-03-02)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
|
||||
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
|
||||
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
|
||||
- Basic backup: snapshots names [#2668](https://github.com/vatesfr/xen-orchestra/issues/2668)
|
||||
- Change placement of "share" button for self [#2663](https://github.com/vatesfr/xen-orchestra/issues/2663)
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Backup report for VDI chain status [#2639](https://github.com/vatesfr/xen-orchestra/issues/2639)
|
||||
- [Dashboard/Health] Control domain VDIs should includes snapshots [#2634](https://github.com/vatesfr/xen-orchestra/issues/2634)
|
||||
- Do not count VM-snapshot in self quota [#2626](https://github.com/vatesfr/xen-orchestra/issues/2626)
|
||||
- [xo-web] Backup logs [#2618](https://github.com/vatesfr/xen-orchestra/issues/2618)
|
||||
- [VM/Snapshots] grouped deletion [#2595](https://github.com/vatesfr/xen-orchestra/issues/2595)
|
||||
- [Backups] add a new state for a VM: skipped [#2591](https://github.com/vatesfr/xen-orchestra/issues/2591)
|
||||
- Set a self-service VM at "share" after creation [#2589](https://github.com/vatesfr/xen-orchestra/issues/2589)
|
||||
- [Backup logs] Improve Unhealthy VDI Chain message [#2586](https://github.com/vatesfr/xen-orchestra/issues/2586)
|
||||
- [SortedTable] Put sort criteria in URL like the filter [#2584](https://github.com/vatesfr/xen-orchestra/issues/2584)
|
||||
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
|
||||
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
|
||||
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
|
||||
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
|
||||
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
|
||||
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Limit VDI export concurrency [#2672](https://github.com/vatesfr/xen-orchestra/issues/2672)
|
||||
- Select is broken outside dev mode [#2645](https://github.com/vatesfr/xen-orchestra/issues/2645)
|
||||
- "New" XOSAN automatically register the user [#2625](https://github.com/vatesfr/xen-orchestra/issues/2625)
|
||||
- [VM/Advanced] Error on resource set change should not be hidden [#2620](https://github.com/vatesfr/xen-orchestra/issues/2620)
|
||||
- misspelled word [#2606](https://github.com/vatesfr/xen-orchestra/issues/2606)
|
||||
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
|
||||
|
||||
## **5.16.0** (2018-01-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Use @xen-orchestra/cron everywhere [#2616](https://github.com/vatesfr/xen-orchestra/issues/2616)
|
||||
- [SortedTable] Possibility to specify grouped/individual actions together [#2596](https://github.com/vatesfr/xen-orchestra/issues/2596)
|
||||
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
|
||||
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
|
||||
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
|
||||
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
|
||||
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
|
||||
- Two factor auth [#1897](https://github.com/vatesfr/xen-orchestra/issues/1897)
|
||||
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
- Self Service User - User don't have quota in his dashboard [#1538](https://github.com/vatesfr/xen-orchestra/issues/1538)
|
||||
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
|
||||
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
|
||||
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
|
||||
|
||||
### Bugs
|
||||
|
||||
- [cron] toJSDate is not a function [#2661](https://github.com/vatesfr/xen-orchestra/issues/2661)
|
||||
- [Delta backup] Merge should not fail when delta contains no data [#2635](https://github.com/vatesfr/xen-orchestra/issues/2635)
|
||||
- Select issues [#2590](https://github.com/vatesfr/xen-orchestra/issues/2590)
|
||||
- Fix selects display [#2575](https://github.com/vatesfr/xen-orchestra/issues/2575)
|
||||
- [SortedTable] Stuck when displaying last page [#2569](https://github.com/vatesfr/xen-orchestra/issues/2569)
|
||||
- [vm/network] Duplicate key error [#2553](https://github.com/vatesfr/xen-orchestra/issues/2553)
|
||||
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
|
||||
- TZ selector is not used for backup schedule preview [#2464](https://github.com/vatesfr/xen-orchestra/issues/2464)
|
||||
- Remove filter in VM/network view [#2548](https://github.com/vatesfr/xen-orchestra/issues/2548)
|
||||
|
||||
|
||||
## **5.15.0** (2017-12-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
|
||||
* Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
|
||||
* New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
|
||||
* Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
|
||||
* [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
|
||||
* Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
|
||||
* Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
|
||||
* Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
|
||||
* Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
* [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
* Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
|
||||
- Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
|
||||
- New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
|
||||
- Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
|
||||
- [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
|
||||
- Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
|
||||
- Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
|
||||
- Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
|
||||
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
|
||||
|
||||
### Bugs
|
||||
|
||||
* VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
|
||||
* Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
|
||||
* [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
|
||||
* Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
|
||||
* VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
|
||||
- VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
|
||||
- Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
|
||||
- [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
|
||||
- Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
|
||||
- VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
|
||||
|
||||
|
||||
## **5.14.0** (2017-10-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
|
||||
* [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
|
||||
* Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
|
||||
* [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
|
||||
* [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
|
||||
* [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
|
||||
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
* Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
|
||||
* Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
|
||||
* [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
|
||||
* token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
* On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
|
||||
* Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
|
||||
* Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
|
||||
- VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
|
||||
- [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
|
||||
- Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
|
||||
- [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
|
||||
- [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
|
||||
- [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
|
||||
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
- Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
|
||||
- Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
|
||||
- [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
|
||||
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
- On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
|
||||
- Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
|
||||
- Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
|
||||
|
||||
### Bugs
|
||||
|
||||
* Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
|
||||
* Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
|
||||
* [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
|
||||
* [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
|
||||
* [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
|
||||
* [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
|
||||
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
* Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
|
||||
|
||||
- Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
|
||||
- Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
|
||||
- [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
|
||||
- [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
|
||||
- [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
|
||||
- [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
|
||||
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
- Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
|
||||
|
||||
## **5.13.0** (2017-09-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
* Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
|
||||
* [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
|
||||
* Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
|
||||
* Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
|
||||
* Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
|
||||
* Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
|
||||
* [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
|
||||
* [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
|
||||
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
* Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
|
||||
* [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
|
||||
* [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
|
||||
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
* SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
|
||||
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
- Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
|
||||
- [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
|
||||
- Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
|
||||
- Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
|
||||
- Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
|
||||
- Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
|
||||
- [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
|
||||
- [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
|
||||
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
- Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
|
||||
- [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
|
||||
- [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
|
||||
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
- SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
|
||||
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
|
||||
### Bugs
|
||||
|
||||
* iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
|
||||
* Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
|
||||
* Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
|
||||
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
* iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
|
||||
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
* A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
|
||||
- iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
|
||||
- Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
|
||||
- Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
|
||||
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
- iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
|
||||
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
- A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
|
||||
|
||||
## **5.12.0** (2017-08-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
|
||||
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
* Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
|
||||
* SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
|
||||
* Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
|
||||
* "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
|
||||
* [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
|
||||
* Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
|
||||
* It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
|
||||
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
* Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
|
||||
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
- PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
|
||||
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
- Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
|
||||
- SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
|
||||
- Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
|
||||
- "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
|
||||
- [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
|
||||
- Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
|
||||
- It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
|
||||
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
- Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
|
||||
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
|
||||
### Bugs
|
||||
|
||||
* Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
|
||||
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
* VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
|
||||
* Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
|
||||
* Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
|
||||
* Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
|
||||
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
- Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
|
||||
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
- VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
|
||||
- Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
|
||||
- Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
|
||||
- Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
|
||||
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
|
||||
## **5.11.0** (2017-07-31)
|
||||
|
||||
46
CODE_OF_CONDUCT.md
Normal file
46
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at julien.fontanet@vates.fr. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
24
flow-typed/lodash.js
vendored
Normal file
24
flow-typed/lodash.js
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
declare module 'lodash' {
|
||||
declare export function forEach<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: (V, K) => void
|
||||
): void
|
||||
declare export function groupBy<K, V>(
|
||||
object: { [K]: V },
|
||||
iteratee: K | ((V, K) => string)
|
||||
): { [string]: V[] }
|
||||
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
|
||||
declare export function isEmpty(mixed): boolean
|
||||
declare export function keyBy<T>(array: T[], iteratee: string): boolean
|
||||
declare export function last<T>(array?: T[]): T | void
|
||||
declare export function map<T1, T2>(
|
||||
collection: T1[],
|
||||
iteratee: (T1) => T2
|
||||
): T2[]
|
||||
declare export function mapValues<K, V1, V2>(
|
||||
object: { [K]: V1 },
|
||||
iteratee: (V1, K) => V2
|
||||
): { [K]: V2 }
|
||||
declare export function noop(...args: mixed[]): void
|
||||
declare export function values<K, V>(object: { [K]: V }): V[]
|
||||
}
|
||||
14
flow-typed/promise-toolbox.js
vendored
Normal file
14
flow-typed/promise-toolbox.js
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
declare module 'promise-toolbox' {
|
||||
declare export function cancelable(Function): Function
|
||||
declare export function defer<T>(): {|
|
||||
promise: Promise<T>,
|
||||
reject: T => void,
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromCallback<T>(
|
||||
(cb: (error: any, value: T) => void) => void
|
||||
): Promise<T>
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
}
|
||||
2
flow-typed/xo.js
vendored
Normal file
2
flow-typed/xo.js
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// eslint-disable-next-line no-undef
|
||||
declare type $Dict<T, K = string> = { [K]: T }
|
||||
16
package.json
16
package.json
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@babel/register": "^7.0.0-beta.40",
|
||||
"@babel/register": "^7.0.0-beta.44",
|
||||
"babel-7-jest": "^21.3.2",
|
||||
"babel-eslint": "^8.1.2",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^4.14.0",
|
||||
"eslint-config-standard": "^11.0.0-beta.0",
|
||||
"eslint-config-standard-jsx": "^4.0.2",
|
||||
"eslint-config-standard-jsx": "^5.0.0",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^6.0.0",
|
||||
"eslint-plugin-promise": "^3.6.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^3.0.1",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.66.0",
|
||||
"flow-bin": "^0.69.0",
|
||||
"globby": "^8.0.0",
|
||||
"husky": "^0.14.3",
|
||||
"jest": "^22.0.4",
|
||||
@@ -34,15 +34,18 @@
|
||||
"testEnvironment": "node",
|
||||
"testPathIgnorePatterns": [
|
||||
"/dist/",
|
||||
"/xo-vmdk-to-vhd/",
|
||||
"/xo-web/"
|
||||
],
|
||||
"testRegex": "\\.spec\\.js$",
|
||||
"transform": {
|
||||
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
|
||||
"/@xen-orchestra/fs/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/vhd-lib/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-vmdk-to-vhd/.+\\.jsx?$": "babel-7-jest",
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
@@ -51,12 +54,13 @@
|
||||
"build": "scripts/run-script --parallel build",
|
||||
"clean": "scripts/run-script --parallel clean",
|
||||
"dev": "scripts/run-script --parallel dev",
|
||||
"dev-test": "jest --bail --watch",
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"posttest": "scripts/run-script test",
|
||||
"precommit": "scripts/lint-staged",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"test": "jest && flow status"
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\""
|
||||
},
|
||||
"workspaces": [
|
||||
"@xen-orchestra/*",
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.2.1",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -30,9 +30,9 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -70,6 +70,29 @@ export class And extends Node {
|
||||
}
|
||||
}
|
||||
|
||||
export class Comparison extends Node {
|
||||
constructor (operator, value) {
|
||||
super()
|
||||
this._comparator = Comparison.comparators[operator]
|
||||
this._operator = operator
|
||||
this._value = value
|
||||
}
|
||||
|
||||
match (value) {
|
||||
return typeof value === 'number' && this._comparator(value, this._value)
|
||||
}
|
||||
|
||||
toString () {
|
||||
return this._operator + String(this._value)
|
||||
}
|
||||
}
|
||||
Comparison.comparators = {
|
||||
'>': (a, b) => a > b,
|
||||
'>=': (a, b) => a >= b,
|
||||
'<': (a, b) => a < b,
|
||||
'<=': (a, b) => a <= b,
|
||||
}
|
||||
|
||||
export class Or extends Node {
|
||||
constructor (children) {
|
||||
super()
|
||||
@@ -408,6 +431,13 @@ const parser = P.grammar({
|
||||
P.text(')')
|
||||
).map(_ => new Or(_[4])),
|
||||
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
|
||||
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
|
||||
val = +val
|
||||
if (Number.isNaN(val)) {
|
||||
throw new TypeError('value must be a number')
|
||||
}
|
||||
return new Comparison(op, val)
|
||||
}),
|
||||
P.seq(r.string, r.ws, P.text(':'), r.ws, r.term).map(
|
||||
_ => new Property(_[0], _[4])
|
||||
),
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "value-matcher",
|
||||
"version": "0.1.0",
|
||||
"version": "0.2.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -28,10 +28,10 @@
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "vhd-cli",
|
||||
"version": "0.0.0",
|
||||
"version": "0.0.1",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -26,10 +26,11 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"@nraynaud/xo-fs": "^0.0.5",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"babel-runtime": "^6.22.0",
|
||||
"exec-promise": "^0.7.0"
|
||||
"exec-promise": "^0.7.0",
|
||||
"struct-fu": "^1.2.0",
|
||||
"vhd-lib": "^0.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
@@ -38,14 +39,18 @@
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"babel-preset-stage-3": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
"execa": "^0.10.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"rimraf": "^2.6.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepare": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
|
||||
15
packages/vhd-cli/src/commands/check.js
Normal file
15
packages/vhd-cli/src/commands/check.js
Normal file
@@ -0,0 +1,15 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
for (const vhd of args) {
|
||||
try {
|
||||
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
|
||||
console.log('ok:', vhd)
|
||||
} catch (error) {
|
||||
console.error('nok:', vhd, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
12
packages/vhd-cli/src/commands/info.js
Normal file
12
packages/vhd-cli/src/commands/info.js
Normal file
@@ -0,0 +1,12 @@
|
||||
import Vhd from 'vhd-lib'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
export default async args => {
|
||||
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd.header)
|
||||
console.log(vhd.footer)
|
||||
}
|
||||
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
21
packages/vhd-cli/src/commands/synthetize.js
Normal file
@@ -0,0 +1,21 @@
|
||||
import path from 'path'
|
||||
import { createSyntheticStream } from 'vhd-lib'
|
||||
import { createWriteStream } from 'fs'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
|
||||
export default async function main (args) {
|
||||
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
|
||||
return `Usage: ${this.command} <input VHD> <output VHD>`
|
||||
}
|
||||
|
||||
const handler = getHandler({ url: 'file:///' })
|
||||
return new Promise((resolve, reject) => {
|
||||
createSyntheticStream(handler, path.resolve(args[0]))
|
||||
.on('error', reject)
|
||||
.pipe(
|
||||
createWriteStream(args[1])
|
||||
.on('error', reject)
|
||||
.on('finish', resolve)
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -1,19 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import execPromise from 'exec-promise'
|
||||
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
|
||||
import { resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import commands from './commands'
|
||||
|
||||
execPromise(async args => {
|
||||
const vhd = new Vhd(
|
||||
new RemoteHandlerLocal({ url: 'file:///' }),
|
||||
resolve(args[0])
|
||||
function runCommand (commands, [command, ...args]) {
|
||||
if (command === undefined || command === '-h' || command === '--help') {
|
||||
command = 'help'
|
||||
}
|
||||
|
||||
const fn = commands[command]
|
||||
|
||||
if (fn === undefined) {
|
||||
if (command === 'help') {
|
||||
return `Usage:
|
||||
|
||||
${Object.keys(commands)
|
||||
.filter(command => command !== 'help')
|
||||
.map(command => ` ${this.command} ${command}`)
|
||||
.join('\n\n')}`
|
||||
}
|
||||
|
||||
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
|
||||
}
|
||||
|
||||
return fn.call(
|
||||
{
|
||||
__proto__: this,
|
||||
command: `${this.command} ${command}`,
|
||||
},
|
||||
args
|
||||
)
|
||||
}
|
||||
|
||||
await vhd.readHeaderAndFooter()
|
||||
|
||||
console.log(vhd._header)
|
||||
console.log(vhd._footer)
|
||||
})
|
||||
execPromise(
|
||||
runCommand.bind(
|
||||
{
|
||||
command: 'vhd-cli',
|
||||
runCommand,
|
||||
},
|
||||
commands
|
||||
)
|
||||
)
|
||||
|
||||
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
28
packages/vhd-cli/src/info.integ.spec.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import command from './commands/info'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('can run the command', async () => {
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
|
||||
await command(['empty.vhd'])
|
||||
})
|
||||
@@ -1,461 +0,0 @@
|
||||
import assert from 'assert'
|
||||
import fu from '@nraynaud/struct-fu'
|
||||
import { dirname } from 'path'
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
/* eslint-disable no-unused-vars */
|
||||
|
||||
const HARD_DISK_TYPE_DIFFERENCING = 4
|
||||
const HARD_DISK_TYPE_DYNAMIC = 3
|
||||
const HARD_DISK_TYPE_FIXED = 2
|
||||
const PLATFORM_CODE_NONE = 0
|
||||
export const SECTOR_SIZE = 512
|
||||
|
||||
/* eslint-enable no-unused vars */
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85
|
||||
fu.byte('reserved', 426), // 86
|
||||
])
|
||||
const FOOTER_SIZE = fuFooter.size
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.byte('reserved1', 4),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
],
|
||||
8
|
||||
),
|
||||
fu.byte('reserved2', 256),
|
||||
])
|
||||
const HEADER_SIZE = fuHeader.size
|
||||
|
||||
// ===================================================================
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// bytes[] bit manipulation
|
||||
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
|
||||
const setBit = (map, bit) => {
|
||||
map[bit >> 3] |= 1 << (bit & 7)
|
||||
}
|
||||
const unsetBit = (map, bit) => {
|
||||
map[bit >> 3] &= ~(1 << (bit & 7))
|
||||
}
|
||||
|
||||
const addOffsets = (...offsets) =>
|
||||
offsets.reduce(
|
||||
(a, b) =>
|
||||
b == null
|
||||
? a
|
||||
: typeof b === 'object'
|
||||
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
|
||||
: { bytes: a.bytes + b, bits: a.bits },
|
||||
{ bytes: 0, bits: 0 }
|
||||
)
|
||||
|
||||
const pack = (field, value, buf, offset) => {
|
||||
field.pack(value, buf, addOffsets(field.offset, offset))
|
||||
}
|
||||
|
||||
const unpack = (field, buf, offset) =>
|
||||
field.unpack(buf, addOffsets(field.offset, offset))
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const streamToNewBuffer = stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
let length = 0
|
||||
|
||||
const onData = chunk => {
|
||||
chunks.push(chunk)
|
||||
length += chunk.length
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(Buffer.concat(chunks, length))
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
const streamToExistingBuffer = (
|
||||
stream,
|
||||
buffer,
|
||||
offset = 0,
|
||||
end = buffer.length
|
||||
) =>
|
||||
new Promise((resolve, reject) => {
|
||||
assert(offset >= 0)
|
||||
assert(end > offset)
|
||||
assert(end <= buffer.length)
|
||||
|
||||
let i = offset
|
||||
|
||||
const onData = chunk => {
|
||||
const prev = i
|
||||
i += chunk.length
|
||||
|
||||
if (i > end) {
|
||||
return onError(new Error('too much data'))
|
||||
}
|
||||
|
||||
chunk.copy(buffer, prev)
|
||||
}
|
||||
stream.on('data', onData)
|
||||
|
||||
const clean = () => {
|
||||
stream.removeListener('data', onData)
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
}
|
||||
const onEnd = () => {
|
||||
resolve(i - offset)
|
||||
clean()
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
const onError = error => {
|
||||
reject(error)
|
||||
clean()
|
||||
}
|
||||
stream.on('error', onError)
|
||||
})
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
const computeChecksum = (struct, buf, offset = 0) => {
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumField = struct.fields.checksum
|
||||
const checksumOffset = offset + checksumField.offset
|
||||
for (let i = offset, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = offset + struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
return ~sum >>> 0
|
||||
}
|
||||
|
||||
const verifyChecksum = (struct, buf, offset) =>
|
||||
unpack(struct.fields.checksum, buf, offset) ===
|
||||
computeChecksum(struct, buf, offset)
|
||||
|
||||
const getParentLocatorSize = parentLocatorEntry => {
|
||||
const { platformDataSpace } = parentLocatorEntry
|
||||
|
||||
if (platformDataSpace < SECTOR_SIZE) {
|
||||
return platformDataSpace * SECTOR_SIZE
|
||||
}
|
||||
|
||||
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Euclidean division, returns the quotient and the remainder of a / b.
|
||||
const div = (a, b) => [Math.floor(a / b), a % b]
|
||||
|
||||
export default class Vhd {
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
|
||||
this._blockAllocationTable = null
|
||||
this._blockBitmapSize = null
|
||||
this._footer = null
|
||||
this._header = null
|
||||
this._parent = null
|
||||
this._sectorsPerBlock = null
|
||||
}
|
||||
|
||||
// Read `length` bytes starting from `begin`.
|
||||
//
|
||||
// - if `buffer`: it is filled starting from `offset`, and the
|
||||
// number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_read (begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
return this._handler
|
||||
.createReadStream(this._path, {
|
||||
end: begin + length - 1,
|
||||
start: begin,
|
||||
})
|
||||
.then(
|
||||
buf
|
||||
? stream =>
|
||||
streamToExistingBuffer(
|
||||
stream,
|
||||
buf,
|
||||
offset,
|
||||
(offset || 0) + length
|
||||
)
|
||||
: streamToNewBuffer
|
||||
)
|
||||
}
|
||||
|
||||
// - if `buffer`: it is filled with 0 starting from `offset`, and
|
||||
// the number of written bytes is returned;
|
||||
// - otherwise: a new buffer is allocated and returned.
|
||||
_zeroes (length, buf, offset = 0) {
|
||||
if (buf) {
|
||||
assert(offset >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const end = offset + length
|
||||
assert(end <= buf.length)
|
||||
|
||||
buf.fill(0, offset, end)
|
||||
return Promise.resolve(length)
|
||||
}
|
||||
|
||||
return Promise.resolve(Buffer.alloc(length))
|
||||
}
|
||||
|
||||
// Return the position of a block in the VHD or undefined if not found.
|
||||
_getBlockAddress (block) {
|
||||
assert(block >= 0)
|
||||
assert(block < this._header.maxTableEntries)
|
||||
|
||||
const blockAddr = this._blockAllocationTable[block]
|
||||
if (blockAddr !== 0xffffffff) {
|
||||
return blockAddr * SECTOR_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
|
||||
if (!verifyChecksum(fuFooter, buf)) {
|
||||
throw new Error('footer checksum does not match')
|
||||
}
|
||||
|
||||
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
|
||||
throw new Error('header checksum does not match')
|
||||
}
|
||||
|
||||
return this._initMetadata(
|
||||
unpack(fuHeader, buf, FOOTER_SIZE),
|
||||
unpack(fuFooter, buf)
|
||||
)
|
||||
}
|
||||
|
||||
async _initMetadata (header, footer) {
|
||||
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
|
||||
assert(sectorsPerBlock % 1 === 0)
|
||||
|
||||
// 1 bit per sector, rounded up to full sectors
|
||||
this._blockBitmapSize =
|
||||
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
assert(this._blockBitmapSize === SECTOR_SIZE)
|
||||
|
||||
this._footer = footer
|
||||
this._header = header
|
||||
this.size = uint32ToUint64(this._footer.currentSize)
|
||||
|
||||
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
|
||||
const parent = new Vhd(
|
||||
this._handler,
|
||||
`${dirname(this._path)}/${header.parentUnicodeName}`
|
||||
)
|
||||
await parent.readHeaderAndFooter()
|
||||
await parent.readBlockAllocationTable()
|
||||
|
||||
this._parent = parent
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async readBlockAllocationTable () {
|
||||
const { maxTableEntries, tableOffset } = this._header
|
||||
const fuTable = fu.uint32(maxTableEntries)
|
||||
|
||||
this._blockAllocationTable = unpack(
|
||||
fuTable,
|
||||
await this._read(uint32ToUint64(tableOffset), fuTable.size)
|
||||
)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
// read a single sector in a block
|
||||
async _readBlockSector (block, sector, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
assert(begin + length <= SECTOR_SIZE)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const blockBitmapSize = this._blockBitmapSize
|
||||
const parent = this._parent
|
||||
|
||||
if (
|
||||
blockAddr &&
|
||||
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
|
||||
) {
|
||||
return this._read(
|
||||
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
return parent
|
||||
? parent._readBlockSector(block, sector, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
_readBlock (block, begin, length, buf, offset) {
|
||||
assert(begin >= 0)
|
||||
assert(length > 0)
|
||||
|
||||
const { blockSize } = this._header
|
||||
assert(begin + length <= blockSize)
|
||||
|
||||
const blockAddr = this._getBlockAddress(block)
|
||||
const parent = this._parent
|
||||
|
||||
if (!blockAddr) {
|
||||
return parent
|
||||
? parent._readBlock(block, begin, length, buf, offset)
|
||||
: this._zeroes(length, buf, offset)
|
||||
}
|
||||
|
||||
if (!parent) {
|
||||
return this._read(
|
||||
blockAddr + this._blockBitmapSize + begin,
|
||||
length,
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
// FIXME: we should read as many sectors in a single pass as
|
||||
// possible for maximum perf.
|
||||
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
|
||||
return this._readBlockSector(
|
||||
block,
|
||||
sector,
|
||||
beginInSector,
|
||||
Math.min(length, SECTOR_SIZE - beginInSector),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
|
||||
read (buf, begin, length = buf.length, offset) {
|
||||
assert(Buffer.isBuffer(buf))
|
||||
assert(begin >= 0)
|
||||
|
||||
const { size } = this
|
||||
if (begin >= size) {
|
||||
return Promise.resolve(0)
|
||||
}
|
||||
|
||||
const { blockSize } = this._header
|
||||
const [block, beginInBlock] = div(begin, blockSize)
|
||||
|
||||
return this._readBlock(
|
||||
block,
|
||||
beginInBlock,
|
||||
Math.min(length, blockSize - beginInBlock, size - begin),
|
||||
buf,
|
||||
offset
|
||||
)
|
||||
}
|
||||
}
|
||||
3
packages/vhd-lib/.babelrc.js
Normal file
3
packages/vhd-lib/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
56
packages/vhd-lib/package.json
Normal file
56
packages/vhd-lib/package.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"name": "vhd-lib",
|
||||
"version": "0.0.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Primitives for VHD file handling",
|
||||
"keywords": [],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/runtime": "^7.0.0-beta.44",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"async-iterator-to-stream": "^1.0.2",
|
||||
"execa": "^0.10.0",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"promise-toolbox": "^0.9.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"uuid": "^3.0.1",
|
||||
"tmp": "^0.0.33"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-transform-runtime": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"fs-promise": "^2.0.0",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepare": "yarn run build"
|
||||
}
|
||||
}
|
||||
7
packages/vhd-lib/src/_bitmap.js
Normal file
7
packages/vhd-lib/src/_bitmap.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const MASK = 0x80
|
||||
|
||||
export const set = (map, bit) => {
|
||||
map[bit >> 3] |= MASK >> (bit & 7)
|
||||
}
|
||||
|
||||
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0
|
||||
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
37
packages/vhd-lib/src/_computeGeometryForSize.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
export default function computeGeometryForSize (size) {
|
||||
const totalSectors = Math.ceil(size / 512)
|
||||
let sectorsPerTrackCylinder
|
||||
let heads
|
||||
let cylinderTimesHeads
|
||||
if (totalSectors > 65535 * 16 * 255) {
|
||||
throw Error('disk is too big')
|
||||
}
|
||||
// straight copypasta from the file spec appendix on CHS Calculation
|
||||
if (totalSectors >= 65535 * 16 * 63) {
|
||||
sectorsPerTrackCylinder = 255
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
} else {
|
||||
sectorsPerTrackCylinder = 17
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
|
||||
if (heads < 4) {
|
||||
heads = 4
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
|
||||
sectorsPerTrackCylinder = 31
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
if (cylinderTimesHeads >= heads * 1024) {
|
||||
sectorsPerTrackCylinder = 63
|
||||
heads = 16
|
||||
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
|
||||
}
|
||||
}
|
||||
const cylinders = Math.ceil(cylinderTimesHeads / heads)
|
||||
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
|
||||
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
|
||||
}
|
||||
30
packages/vhd-lib/src/_constants.js
Normal file
30
packages/vhd-lib/src/_constants.js
Normal file
@@ -0,0 +1,30 @@
|
||||
export const BLOCK_UNUSED = 0xffffffff
|
||||
|
||||
// This lib has been extracted from the Xen Orchestra project.
|
||||
export const CREATOR_APPLICATION = 'xo '
|
||||
|
||||
// Sizes in bytes.
|
||||
export const FOOTER_SIZE = 512
|
||||
export const HEADER_SIZE = 1024
|
||||
export const SECTOR_SIZE = 512
|
||||
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
|
||||
|
||||
export const FOOTER_COOKIE = 'conectix'
|
||||
export const HEADER_COOKIE = 'cxsparse'
|
||||
|
||||
export const DISK_TYPE_FIXED = 2
|
||||
export const DISK_TYPE_DYNAMIC = 3
|
||||
export const DISK_TYPE_DIFFERENCING = 4
|
||||
|
||||
export const PARENT_LOCATOR_ENTRIES = 8
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_WI2R = 0x57693272
|
||||
export const PLATFORM_WI2K = 0x5769326b
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
export const FILE_FORMAT_VERSION = 1 << 16
|
||||
export const HEADER_VERSION = 1 << 16
|
||||
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
56
packages/vhd-lib/src/_createFooterHeader.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import { v4 as generateUuid } from 'uuid'
|
||||
|
||||
import { checksumStruct, fuFooter, fuHeader } from './_structs'
|
||||
import {
|
||||
CREATOR_APPLICATION,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_FIXED,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PLATFORM_WI2K,
|
||||
} from './_constants'
|
||||
|
||||
export function createFooter (
|
||||
size,
|
||||
timestamp,
|
||||
geometry,
|
||||
dataOffset,
|
||||
diskType = DISK_TYPE_FIXED
|
||||
) {
|
||||
const footer = fuFooter.pack({
|
||||
cookie: FOOTER_COOKIE,
|
||||
features: 2,
|
||||
fileFormatVersion: FILE_FORMAT_VERSION,
|
||||
dataOffset,
|
||||
timestamp,
|
||||
creatorApplication: CREATOR_APPLICATION,
|
||||
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
|
||||
originalSize: size,
|
||||
currentSize: size,
|
||||
diskGeometry: geometry,
|
||||
diskType,
|
||||
uuid: generateUuid(null, []),
|
||||
})
|
||||
checksumStruct(footer, fuFooter)
|
||||
return footer
|
||||
}
|
||||
|
||||
export function createHeader (
|
||||
maxTableEntries,
|
||||
tableOffset = HEADER_SIZE + FOOTER_SIZE,
|
||||
blockSize = VHD_BLOCK_SIZE_BYTES
|
||||
) {
|
||||
const header = fuHeader.pack({
|
||||
cookie: HEADER_COOKIE,
|
||||
tableOffset,
|
||||
headerVersion: HEADER_VERSION,
|
||||
maxTableEntries,
|
||||
blockSize,
|
||||
})
|
||||
checksumStruct(header, fuHeader)
|
||||
return header
|
||||
}
|
||||
121
packages/vhd-lib/src/_structs.js
Normal file
121
packages/vhd-lib/src/_structs.js
Normal file
@@ -0,0 +1,121 @@
|
||||
import assert from 'assert'
|
||||
import fu from 'struct-fu'
|
||||
|
||||
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
const uint64Undefinable = fu.derive(
|
||||
fu.uint32(2),
|
||||
number =>
|
||||
number === undefined
|
||||
? [0xffffffff, 0xffffffff]
|
||||
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ =>
|
||||
_[0] === 0xffffffff && _[1] === 0xffffffff
|
||||
? undefined
|
||||
: _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
export const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
uint64Undefinable('dataOffset'), // offset of the header
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
fu.uint8('sectorsPerTrackCylinder'), // 59
|
||||
]),
|
||||
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
|
||||
fu.uint32('checksum'), // 64
|
||||
fu.uint8('uuid', 16), // 68
|
||||
fu.char('saved'), // 84
|
||||
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
|
||||
fu.char('reserved', 426), // 86
|
||||
])
|
||||
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
|
||||
|
||||
export const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
uint64Undefinable('dataOffset'),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
fu.uint32('checksum'),
|
||||
fu.uint8('parentUuid', 16),
|
||||
fu.uint32('parentTimestamp'),
|
||||
fu.uint32('reserved1'),
|
||||
fu.char16be('parentUnicodeName', 512),
|
||||
fu.struct(
|
||||
'parentLocatorEntry',
|
||||
[
|
||||
fu.uint32('platformCode'),
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
fu.char('reserved2', 256),
|
||||
])
|
||||
assert.strictEqual(fuHeader.size, HEADER_SIZE)
|
||||
|
||||
export const packField = (field, value, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
field.pack(
|
||||
value,
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
export const unpackField = (field, buf) => {
|
||||
const { offset } = field
|
||||
|
||||
return field.unpack(
|
||||
buf,
|
||||
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
|
||||
)
|
||||
}
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
export function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
let sum = 0
|
||||
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
37
packages/vhd-lib/src/chain.js
Normal file
37
packages/vhd-lib/src/chain.js
Normal file
@@ -0,0 +1,37 @@
|
||||
import { dirname, relative } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING } from './_constants'
|
||||
|
||||
export default async function chain (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockAllocationTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.setUniqueParentLocator(parentName)
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
}
|
||||
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
42
packages/vhd-lib/src/createReadableRawStream.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
|
||||
export default asyncIteratorToStream(async function * (size, blockParser) {
|
||||
const geometry = computeGeometryForSize(size)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry
|
||||
)
|
||||
let position = 0
|
||||
|
||||
function * filePadding (paddingLength) {
|
||||
if (paddingLength > 0) {
|
||||
const chunkSize = 1024 * 1024 // 1Mo
|
||||
for (
|
||||
let paddingPosition = 0;
|
||||
paddingPosition + chunkSize < paddingLength;
|
||||
paddingPosition += chunkSize
|
||||
) {
|
||||
yield Buffer.alloc(chunkSize)
|
||||
}
|
||||
yield Buffer.alloc(paddingLength % chunkSize)
|
||||
}
|
||||
}
|
||||
|
||||
let next
|
||||
while ((next = await blockParser.next()) !== null) {
|
||||
const paddingLength = next.offsetBytes - position
|
||||
if (paddingLength < 0) {
|
||||
throw new Error('Received out of order blocks')
|
||||
}
|
||||
yield * filePadding(paddingLength)
|
||||
yield next.data
|
||||
position = next.offsetBytes + next.data.length
|
||||
}
|
||||
yield * filePadding(actualSize - position)
|
||||
yield footer
|
||||
})
|
||||
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
143
packages/vhd-lib/src/createReadableSparseStream.js
Normal file
@@ -0,0 +1,143 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
|
||||
import computeGeometryForSize from './_computeGeometryForSize'
|
||||
import { createFooter, createHeader } from './_createFooterHeader'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
import { set as setBitmap } from './_bitmap'
|
||||
|
||||
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
||||
|
||||
/**
|
||||
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
|
||||
*/
|
||||
function createBAT (
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
) {
|
||||
const vhdOccupationTable = []
|
||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||
blockAddressList.forEach(blockPosition => {
|
||||
const scaled = blockPosition / VHD_BLOCK_SIZE_BYTES
|
||||
const vhdTableIndex = Math.floor(scaled)
|
||||
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
|
||||
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
|
||||
currentVhdPositionSector +=
|
||||
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
|
||||
}
|
||||
// not using bit operators to avoid the int32 coercion, that way we can go to 53 bits
|
||||
vhdOccupationTable[vhdTableIndex] =
|
||||
(vhdOccupationTable[vhdTableIndex] || 0) +
|
||||
Math.pow(2, (scaled % 1) * ratio)
|
||||
})
|
||||
return vhdOccupationTable
|
||||
}
|
||||
|
||||
function createBitmap (bitmapSize, ratio, vhdOccupationBucket) {
|
||||
const bitmap = Buffer.alloc(bitmapSize)
|
||||
for (let i = 0; i < VHD_BLOCK_SIZE_SECTORS / ratio; i++) {
|
||||
// do not shift to avoid int32 coercion
|
||||
if ((vhdOccupationBucket * Math.pow(2, -i)) & 1) {
|
||||
for (let j = 0; j < ratio; j++) {
|
||||
setBitmap(bitmap, i * ratio + j)
|
||||
}
|
||||
}
|
||||
}
|
||||
return bitmap
|
||||
}
|
||||
|
||||
function * yieldIfNotEmpty (buffer) {
|
||||
if (buffer.length > 0) {
|
||||
yield buffer
|
||||
}
|
||||
}
|
||||
|
||||
async function * generateFileContent (
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
) {
|
||||
let currentVhdBlockIndex = -1
|
||||
let currentBlockBuffer = Buffer.alloc(0)
|
||||
for await (const next of blockIterator) {
|
||||
const batEntry = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
|
||||
if (batEntry !== currentVhdBlockIndex) {
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
currentBlockBuffer = Buffer.alloc(VHD_BLOCK_SIZE_BYTES)
|
||||
currentVhdBlockIndex = batEntry
|
||||
yield createBitmap(bitmapSize, ratio, vhdOccupationTable[batEntry])
|
||||
}
|
||||
next.data.copy(currentBlockBuffer, next.offsetBytes % VHD_BLOCK_SIZE_BYTES)
|
||||
}
|
||||
yield * yieldIfNotEmpty(currentBlockBuffer)
|
||||
}
|
||||
|
||||
export default asyncIteratorToStream(async function * (
|
||||
diskSize,
|
||||
incomingBlockSize,
|
||||
blockAddressList,
|
||||
blockIterator
|
||||
) {
|
||||
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
|
||||
if (ratio % 1 !== 0) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
|
||||
)
|
||||
}
|
||||
if (ratio > 53) {
|
||||
throw new Error(
|
||||
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
|
||||
)
|
||||
}
|
||||
|
||||
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
|
||||
const tablePhysicalSizeBytes = Math.ceil(maxTableEntries * 4 / 512) * 512
|
||||
|
||||
const batPosition = FOOTER_SIZE + HEADER_SIZE
|
||||
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
|
||||
const geometry = computeGeometryForSize(diskSize)
|
||||
const actualSize = geometry.actualSize
|
||||
const footer = createFooter(
|
||||
actualSize,
|
||||
Math.floor(Date.now() / 1000),
|
||||
geometry,
|
||||
FOOTER_SIZE,
|
||||
DISK_TYPE_DYNAMIC
|
||||
)
|
||||
const header = createHeader(
|
||||
maxTableEntries,
|
||||
batPosition,
|
||||
VHD_BLOCK_SIZE_BYTES
|
||||
)
|
||||
const bitmapSize =
|
||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||
const vhdOccupationTable = createBAT(
|
||||
firstBlockPosition,
|
||||
blockAddressList,
|
||||
ratio,
|
||||
bat,
|
||||
bitmapSize
|
||||
)
|
||||
yield footer
|
||||
yield header
|
||||
yield bat
|
||||
yield * generateFileContent(
|
||||
blockIterator,
|
||||
bitmapSize,
|
||||
ratio,
|
||||
vhdOccupationTable
|
||||
)
|
||||
yield footer
|
||||
})
|
||||
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
153
packages/vhd-lib/src/createSyntheticStream.js
Normal file
@@ -0,0 +1,153 @@
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { dirname, resolve } from 'path'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FOOTER_SIZE,
|
||||
HEADER_SIZE,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
import { fuFooter, fuHeader, checksumStruct } from './_structs'
|
||||
import { test as mapTestBit } from './_bitmap'
|
||||
|
||||
const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
export default asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockAllocationTable()
|
||||
|
||||
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// this is the root VHD
|
||||
const rootVhd = vhds[nVhds - 1]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: set parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: FOOTER_SIZE + HEADER_SIZE,
|
||||
parentTimestamp: rootVhd.header.parentTimestamp,
|
||||
parentUnicodeName: rootVhd.header.parentUnicodeName,
|
||||
parentUuid: rootVhd.header.parentUuid,
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(vhd.batSize)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
dataOffset: FOOTER_SIZE,
|
||||
diskType: rootVhd.footer.diskType,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil(
|
||||
(header.tableOffset + bat.length) / SECTOR_SIZE
|
||||
);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
// TODO: for generic usage the bitmap needs to be properly computed for each block
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd === rootVhd
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
8
packages/vhd-lib/src/index.js
Normal file
8
packages/vhd-lib/src/index.js
Normal file
@@ -0,0 +1,8 @@
|
||||
export { default } from './vhd'
|
||||
export { default as chainVhd } from './chain'
|
||||
export { default as createReadableRawStream } from './createReadableRawStream'
|
||||
export {
|
||||
default as createReadableSparseStream,
|
||||
} from './createReadableSparseStream'
|
||||
export { default as createSyntheticStream } from './createSyntheticStream'
|
||||
export { default as mergeVhd } from './merge'
|
||||
283
packages/vhd-lib/src/merge.integ.spec.js
Normal file
283
packages/vhd-lib/src/merge.integ.spec.js
Normal file
@@ -0,0 +1,283 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import getStream from 'get-stream'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
|
||||
|
||||
import chainVhd from './chain'
|
||||
import createReadStream from './createSyntheticStream'
|
||||
import Vhd from './vhd'
|
||||
import vhdMerge from './merge'
|
||||
import { SECTOR_SIZE } from './_constants'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile (name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
}
|
||||
|
||||
async function checkFile (vhdName) {
|
||||
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
|
||||
}
|
||||
|
||||
async function recoverRawContent (vhdName, rawName, originalSize) {
|
||||
await checkFile(vhdName)
|
||||
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
|
||||
if (originalSize !== undefined) {
|
||||
await execa('truncate', ['-s', originalSize, rawName])
|
||||
}
|
||||
}
|
||||
|
||||
async function convertFromRawToVhd (rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockAllocationTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
const entry = vhd._getBatEntry(i)
|
||||
if (entry !== 0xffffffff) {
|
||||
const block = (await vhd2._readBlock(i)).data
|
||||
await fs.write(
|
||||
recoveredFile,
|
||||
block,
|
||||
0,
|
||||
block.length,
|
||||
vhd2.header.blockSize * i
|
||||
)
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await getStream.buffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
)
|
||||
expect(recovered).toEqual(randomBuffer)
|
||||
})
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const splitPointSectors = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
const endFirstWrite = 3
|
||||
const startSecondWrite = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
await newVhd.writeData(
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockAllocationTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockAllocationTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
)
|
||||
})
|
||||
|
||||
test('createSyntheticStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = getHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = createReadStream(handler, 'randomfile.vhd')
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
|
||||
})
|
||||
77
packages/vhd-lib/src/merge.js
Normal file
77
packages/vhd-lib/src/merge.js
Normal file
@@ -0,0 +1,77 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
|
||||
import Vhd from './vhd'
|
||||
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
export default concurrency(2)(async function merge (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
const parentDiskType = parentVhd.footer.diskType
|
||||
assert(
|
||||
parentDiskType === DISK_TYPE_DIFFERENCING ||
|
||||
parentDiskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([
|
||||
parentVhd.readBlockAllocationTable(),
|
||||
childVhd.readBlockAllocationTable(),
|
||||
])
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
})
|
||||
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
134
packages/vhd-lib/src/vhd.integ.spec.js
Normal file
@@ -0,0 +1,134 @@
|
||||
/* eslint-env jest */
|
||||
import execa from 'execa'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
import { createWriteStream, readFile } from 'fs-promise'
|
||||
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
|
||||
|
||||
import { createFooter } from './_createFooterHeader'
|
||||
import createReadableRawVHDStream from './createReadableRawStream'
|
||||
import createReadableSparseVHDStream from './createReadableSparseStream'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
test('createFooter() does not crash', () => {
|
||||
createFooter(104448, Math.floor(Date.now() / 1000), {
|
||||
cylinders: 3,
|
||||
heads: 4,
|
||||
sectorsPerTrack: 17,
|
||||
})
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream does not crash', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
const fileSize = 1000
|
||||
const stream = createReadableRawVHDStream(fileSize, mockParser)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
})
|
||||
|
||||
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
|
||||
const data = [
|
||||
{
|
||||
offsetBytes: 700,
|
||||
data: Buffer.from('azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: 100,
|
||||
data: Buffer.from('gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
let index = 0
|
||||
const mockParser = {
|
||||
next: () => {
|
||||
if (index < data.length) {
|
||||
const result = data[index]
|
||||
index++
|
||||
return result
|
||||
} else {
|
||||
return null
|
||||
}
|
||||
},
|
||||
}
|
||||
return expect(
|
||||
new Promise((resolve, reject) => {
|
||||
const stream = createReadableRawVHDStream(100000, mockParser)
|
||||
stream.on('error', reject)
|
||||
const pipe = stream.pipe(createWriteStream('outputStream'))
|
||||
pipe.on('finish', resolve)
|
||||
pipe.on('error', reject)
|
||||
})
|
||||
).rejects.toThrow('Received out of order blocks')
|
||||
})
|
||||
|
||||
test('ReadableSparseVHDStream can handle a sparse file', async () => {
|
||||
const blockSize = Math.pow(2, 16)
|
||||
const blocks = [
|
||||
{
|
||||
offsetBytes: blockSize * 3,
|
||||
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
|
||||
},
|
||||
{
|
||||
offsetBytes: blockSize * 5,
|
||||
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
|
||||
},
|
||||
]
|
||||
const fileSize = blockSize * 10
|
||||
const stream = createReadableSparseVHDStream(
|
||||
fileSize,
|
||||
blockSize,
|
||||
[100, 700],
|
||||
blocks
|
||||
)
|
||||
const pipe = stream.pipe(createWriteStream('output.vhd'))
|
||||
await fromEvent(pipe, 'finish')
|
||||
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-O',
|
||||
'raw',
|
||||
'output.vhd',
|
||||
'out1.raw',
|
||||
])
|
||||
const out1 = await readFile('out1.raw')
|
||||
const expected = Buffer.alloc(fileSize)
|
||||
blocks.forEach(b => {
|
||||
b.data.copy(expected, b.offsetBytes)
|
||||
})
|
||||
await expect(out1.slice(0, expected.length)).toEqual(expected)
|
||||
})
|
||||
631
packages/vhd-lib/src/vhd.js
Normal file
631
packages/vhd-lib/src/vhd.js
Normal file
@@ -0,0 +1,631 @@
|
||||
import assert from 'assert'
|
||||
import getStream from 'get-stream'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import constantStream from './_constant-stream'
|
||||
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
|
||||
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
|
||||
import {
|
||||
BLOCK_UNUSED,
|
||||
DISK_TYPE_DIFFERENCING,
|
||||
DISK_TYPE_DYNAMIC,
|
||||
FILE_FORMAT_VERSION,
|
||||
FOOTER_COOKIE,
|
||||
FOOTER_SIZE,
|
||||
HEADER_COOKIE,
|
||||
HEADER_SIZE,
|
||||
HEADER_VERSION,
|
||||
PARENT_LOCATOR_ENTRIES,
|
||||
PLATFORM_NONE,
|
||||
PLATFORM_W2KU,
|
||||
SECTOR_SIZE,
|
||||
} from './_constants'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG
|
||||
? str => console.log(`[vhd-merge]${str}`)
|
||||
: () => null
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
// Spec:
|
||||
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
|
||||
//
|
||||
// C implementation:
|
||||
// https://github.com/rubiojr/vhd-util-convert
|
||||
//
|
||||
// ===================================================================
|
||||
|
||||
const computeBatSize = entries =>
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
|
||||
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
const expected = checksumStruct(buf, struct)
|
||||
if (actual !== expected) {
|
||||
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
|
||||
}
|
||||
}
|
||||
|
||||
// unused block as buffer containing a uint32BE
|
||||
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
|
||||
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
// Format:
|
||||
//
|
||||
// 1. Footer (512)
|
||||
// 2. Header (1024)
|
||||
// 3. Unordered entries
|
||||
// - BAT (batSize @ header.tableOffset)
|
||||
// - Blocks (@ blockOffset(i))
|
||||
// - bitmap (blockBitmapSize)
|
||||
// - data (header.blockSize)
|
||||
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
|
||||
// 4. Footer (512 @ vhdSize - 512)
|
||||
//
|
||||
// Variables:
|
||||
//
|
||||
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
|
||||
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
|
||||
// - blockOffset(i) = bat[i] * sectorSize
|
||||
// - nBlocks = ceil(footer.currentSize / header.blockSize)
|
||||
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
|
||||
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
|
||||
// - sectorSize = 512
|
||||
|
||||
export default class Vhd {
|
||||
get batSize () {
|
||||
return computeBatSize(this.header.maxTableEntries)
|
||||
}
|
||||
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Read functions.
|
||||
// =================================================================
|
||||
|
||||
_readStream (start, n) {
|
||||
return this._handler.createReadStream(this._path, {
|
||||
start,
|
||||
end: start + n - 1, // end is inclusive
|
||||
})
|
||||
}
|
||||
|
||||
_read (start, n) {
|
||||
return this._readStream(start, n)
|
||||
.then(getStream.buffer)
|
||||
.then(buf => {
|
||||
assert.equal(buf.length, n)
|
||||
return buf
|
||||
})
|
||||
}
|
||||
|
||||
containsBlock (id) {
|
||||
return this._getBatEntry(id) !== BLOCK_UNUSED
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = FOOTER_SIZE + HEADER_SIZE
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(end, header.tableOffset + this.batSize)
|
||||
|
||||
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
|
||||
if (entry.platformCode !== PLATFORM_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of headers: ${end}.`)
|
||||
|
||||
return end
|
||||
}
|
||||
|
||||
// Returns the first sector after data.
|
||||
getEndOfData () {
|
||||
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
|
||||
|
||||
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
|
||||
const { maxTableEntries } = this.header
|
||||
for (let i = 0; i < maxTableEntries; i++) {
|
||||
const blockAddr = this._getBatEntry(i)
|
||||
|
||||
if (blockAddr !== BLOCK_UNUSED) {
|
||||
end = Math.max(end, blockAddr + fullBlockSize)
|
||||
}
|
||||
}
|
||||
|
||||
debug(`End of data: ${end}.`)
|
||||
|
||||
return sectorsToBytes(end)
|
||||
}
|
||||
|
||||
// TODO: extract the checks into reusable functions:
|
||||
// - better human reporting
|
||||
// - auto repair if possible
|
||||
async readHeaderAndFooter (checkSecondFooter = true) {
|
||||
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(FOOTER_SIZE)
|
||||
|
||||
assertChecksum('footer', bufFooter, fuFooter)
|
||||
assertChecksum('header', bufHeader, fuHeader)
|
||||
|
||||
if (checkSecondFooter) {
|
||||
const size = await this._handler.getSize(this._path)
|
||||
assert(
|
||||
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
|
||||
'footer1 !== footer2'
|
||||
)
|
||||
}
|
||||
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
|
||||
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
|
||||
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
|
||||
assert(footer.originalSize <= footer.currentSize)
|
||||
assert(
|
||||
footer.diskType === DISK_TYPE_DIFFERENCING ||
|
||||
footer.diskType === DISK_TYPE_DYNAMIC
|
||||
)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
assert.strictEqual(header.cookie, HEADER_COOKIE)
|
||||
assert.strictEqual(header.dataOffset, undefined)
|
||||
assert.strictEqual(header.headerVersion, HEADER_VERSION)
|
||||
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
|
||||
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
const sectorsPerBlock = (this.sectorsPerBlock =
|
||||
header.blockSize / SECTOR_SIZE)
|
||||
|
||||
// Compute bitmap size in sectors.
|
||||
// Default: 1.
|
||||
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
|
||||
sectorsPerBlock >> 3
|
||||
))
|
||||
|
||||
// Full block size => data block size + bitmap size.
|
||||
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
|
||||
|
||||
// In bytes.
|
||||
// Default: 512.
|
||||
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
|
||||
}
|
||||
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockAllocationTable () {
|
||||
const { header } = this
|
||||
this.blockTable = await this._read(
|
||||
header.tableOffset,
|
||||
header.maxTableEntries * 4
|
||||
)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
_getBatEntry (block) {
|
||||
return this.blockTable.readUInt32BE(block * 4)
|
||||
}
|
||||
|
||||
_readBlock (blockId, onlyBitmap = false) {
|
||||
const blockAddr = this._getBatEntry(blockId)
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
throw new Error(`no such block ${blockId}`)
|
||||
}
|
||||
|
||||
return this._read(
|
||||
sectorsToBytes(blockAddr),
|
||||
onlyBitmap ? this.bitmapSize : this.fullBlockSize
|
||||
).then(
|
||||
buf =>
|
||||
onlyBitmap
|
||||
? { id: blockId, bitmap: buf }
|
||||
: {
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
_getFirstAndLastBlocks () {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
let i = 0
|
||||
let j = 0
|
||||
let first, firstSector, last, lastSector
|
||||
|
||||
// get first allocated block for initialization
|
||||
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
|
||||
i += 1
|
||||
j += 4
|
||||
|
||||
if (i === n) {
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
first = last = i
|
||||
|
||||
while (i < n) {
|
||||
const sector = bat.readUInt32BE(j)
|
||||
if (sector !== BLOCK_UNUSED) {
|
||||
if (sector < firstSector) {
|
||||
first = i
|
||||
firstSector = sector
|
||||
} else if (sector > lastSector) {
|
||||
last = i
|
||||
lastSector = sector
|
||||
}
|
||||
}
|
||||
|
||||
i += 1
|
||||
j += 4
|
||||
}
|
||||
|
||||
return { first, firstSector, last, lastSector }
|
||||
}
|
||||
|
||||
// =================================================================
|
||||
// Write functions.
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
async _write (data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
}
|
||||
|
||||
async _freeFirstBlockSpace (spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const stream = await this._readStream(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(stream, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async ensureBatSize (entries) {
|
||||
const { header } = this
|
||||
const prevMaxTableEntries = header.maxTableEntries
|
||||
if (prevMaxTableEntries >= entries) {
|
||||
return
|
||||
}
|
||||
|
||||
const newBatSize = computeBatSize(entries)
|
||||
await this._freeFirstBlockSpace(newBatSize - this.batSize)
|
||||
const maxTableEntries = (header.maxTableEntries = entries)
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
|
||||
debug(
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
}
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
_setBatEntry (block, blockSector) {
|
||||
const i = block * 4
|
||||
const { blockTable } = this
|
||||
|
||||
blockTable.writeUInt32BE(blockSector, i)
|
||||
|
||||
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
|
||||
}
|
||||
|
||||
// Make a new empty block at vhd end.
|
||||
// Update block allocation table in context and in file.
|
||||
async createBlock (blockId) {
|
||||
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
|
||||
|
||||
debug(`create block ${blockId} at ${blockAddr}`)
|
||||
|
||||
await Promise.all([
|
||||
// Write an empty block and addr in vhd file.
|
||||
this._write(
|
||||
constantStream([0], this.fullBlockSize),
|
||||
sectorsToBytes(blockAddr)
|
||||
),
|
||||
|
||||
this._setBatEntry(blockId, blockAddr),
|
||||
])
|
||||
|
||||
return blockAddr
|
||||
}
|
||||
|
||||
// Write a bitmap at a block address.
|
||||
async writeBlockBitmap (blockAddr, bitmap) {
|
||||
const { bitmapSize } = this
|
||||
|
||||
if (bitmap.length !== bitmapSize) {
|
||||
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
|
||||
}
|
||||
|
||||
const offset = sectorsToBytes(blockAddr)
|
||||
|
||||
debug(
|
||||
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
|
||||
'hex'
|
||||
)})`
|
||||
)
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeEntireBlock (block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
|
||||
} else if (parentBitmap === undefined) {
|
||||
parentBitmap = (await this._readBlock(block.id, true)).bitmap
|
||||
}
|
||||
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
debug(
|
||||
`writeBlockSectors at ${offset} block=${
|
||||
block.id
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
sectorsToBytes(endSectorId)
|
||||
),
|
||||
sectorsToBytes(offset)
|
||||
)
|
||||
}
|
||||
|
||||
async coalesceBlock (child, blockId) {
|
||||
const block = await child._readBlock(blockId)
|
||||
const { bitmap, data } = block
|
||||
|
||||
debug(`coalesceBlock block=${blockId}`)
|
||||
|
||||
// For each sector of block data...
|
||||
const { sectorsPerBlock } = child
|
||||
for (let i = 0; i < sectorsPerBlock; i++) {
|
||||
// If no changes on one sector, skip.
|
||||
if (!mapTestBit(bitmap, i)) {
|
||||
continue
|
||||
}
|
||||
let parentBitmap = null
|
||||
let endSector = i + 1
|
||||
|
||||
// Count changed sectors.
|
||||
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
|
||||
++endSector
|
||||
}
|
||||
|
||||
// Write n sectors into parent.
|
||||
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this.writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
}
|
||||
|
||||
// Return the merged data size
|
||||
return data.length
|
||||
}
|
||||
|
||||
// Write a context footer. (At the end and beginning of a vhd file.)
|
||||
async writeFooter (onlyEndFooter = false) {
|
||||
const { footer } = this
|
||||
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
const eof = await this._handler.getSize(this._path)
|
||||
// sometimes the file is longer than anticipated, we still need to put the footer at the end
|
||||
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
`Write footer at: ${offset} (checksum=${
|
||||
footer.checksum
|
||||
}). (data=${rawFooter.toString('hex')})`
|
||||
)
|
||||
if (!onlyEndFooter) {
|
||||
await this._write(rawFooter, 0)
|
||||
}
|
||||
await this._write(rawFooter, offset)
|
||||
}
|
||||
|
||||
writeHeader () {
|
||||
const { header } = this
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
header.checksum = checksumStruct(rawHeader, fuHeader)
|
||||
const offset = FOOTER_SIZE
|
||||
debug(
|
||||
`Write header at: ${offset} (checksum=${
|
||||
header.checksum
|
||||
}). (data=${rawHeader.toString('hex')})`
|
||||
)
|
||||
return this._write(rawHeader, offset)
|
||||
}
|
||||
|
||||
async writeData (offsetSectors, buffer) {
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
|
||||
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
|
||||
const endBufferSectors = offsetSectors + bufferSizeSectors
|
||||
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
|
||||
await this.ensureBatSize(lastBlock)
|
||||
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
|
||||
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
|
||||
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
|
||||
|
||||
for (
|
||||
let currentBlock = startBlock;
|
||||
currentBlock <= lastBlock;
|
||||
currentBlock++
|
||||
) {
|
||||
const offsetInBlockSectors = Math.max(
|
||||
0,
|
||||
offsetSectors - currentBlock * this.sectorsPerBlock
|
||||
)
|
||||
const endInBlockSectors = Math.min(
|
||||
endBufferSectors - currentBlock * this.sectorsPerBlock,
|
||||
this.sectorsPerBlock
|
||||
)
|
||||
const startInBuffer = Math.max(
|
||||
0,
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
|
||||
)
|
||||
const endInBuffer = Math.min(
|
||||
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
|
||||
SECTOR_SIZE,
|
||||
buffer.length
|
||||
)
|
||||
let inputBuffer
|
||||
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
|
||||
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
|
||||
} else {
|
||||
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
|
||||
buffer.copy(
|
||||
inputBuffer,
|
||||
offsetInBlockSectors * SECTOR_SIZE,
|
||||
startInBuffer,
|
||||
endInBuffer
|
||||
)
|
||||
}
|
||||
await this.writeBlockSectors(
|
||||
{ id: currentBlock, data: inputBuffer },
|
||||
offsetInBlockSectors,
|
||||
endInBlockSectors
|
||||
)
|
||||
}
|
||||
await this.writeFooter()
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators (neededSectors) {
|
||||
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
|
||||
firstLocatorOffset / SECTOR_SIZE
|
||||
if (currentSpace < neededSectors) {
|
||||
const deltaSectors = neededSectors - currentSpace
|
||||
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
|
||||
this.header.tableOffset += sectorsToBytes(deltaSectors)
|
||||
await this._write(this.blockTable, this.header.tableOffset)
|
||||
}
|
||||
return firstLocatorOffset
|
||||
}
|
||||
|
||||
async setUniqueParentLocator (fileNameString) {
|
||||
const { header } = this
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
|
||||
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await this._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace =
|
||||
dataSpaceSectors * SECTOR_SIZE
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.16.5",
|
||||
"version": "0.16.9",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -143,7 +143,9 @@ export const isOpaqueRef = value =>
|
||||
|
||||
const RE_READ_ONLY_METHOD = /^[^.]+\.get_/
|
||||
const isReadOnlyCall = (method, args) =>
|
||||
args.length === 1 && isOpaqueRef(args[0]) && RE_READ_ONLY_METHOD.test(method)
|
||||
args.length === 1 &&
|
||||
typeof args[0] === 'string' &&
|
||||
RE_READ_ONLY_METHOD.test(method)
|
||||
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
@@ -180,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const getTaskResult = (task, onSuccess, onFailure) => {
|
||||
const getTaskResult = task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return [onFailure(new Cancel('task canceled'))]
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
return [onFailure(wrapError(task.error_info))]
|
||||
return Promise.reject(wrapError(task.error_info))
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return [onSuccess(task.result)]
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ export class Xapi extends EventEmitter {
|
||||
objects.getKey = getKey
|
||||
|
||||
this._objectsByRefs = createObject(null)
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = null
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = undefined
|
||||
|
||||
this._taskWatchers = Object.create(null)
|
||||
|
||||
@@ -407,22 +409,22 @@ export class Xapi extends EventEmitter {
|
||||
return this._readOnly && !isReadOnlyCall(method, args)
|
||||
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
|
||||
: this._sessionCall(`Async.${method}`, args).then(taskRef => {
|
||||
$cancelToken.promise.then(() => {
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
$cancelToken.promise.then(() => {
|
||||
// TODO: do not trigger if the task is already over
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// create a task and automatically destroy it when settled
|
||||
//
|
||||
// allowed even in read-only mode because it does not have impact on the
|
||||
// XenServer and it's necessary for getResource()
|
||||
createTask (nameLabel, nameDescription = '') {
|
||||
if (this._readOnly) {
|
||||
return Promise.reject(new Error('cannot create task in read only mode'))
|
||||
}
|
||||
|
||||
const promise = this._sessionCall('task.create', [
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
@@ -441,16 +443,18 @@ export class Xapi extends EventEmitter {
|
||||
// this lib), UUID (unique identifier that some objects have) or
|
||||
// opaque reference (internal to XAPI).
|
||||
getObject (idOrUuidOrRef, defaultValue) {
|
||||
const object =
|
||||
typeof idOrUuidOrRef === 'string'
|
||||
? this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
: this._objects.all[idOrUuidOrRef.$id]
|
||||
if (typeof idOrUuidOrRef === 'object') {
|
||||
idOrUuidOrRef = idOrUuidOrRef.$id
|
||||
}
|
||||
|
||||
if (object) return object
|
||||
const object =
|
||||
this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is not object can be matched to ' + idOrUuidOrRef)
|
||||
throw new Error('no object with UUID or opaque ref: ' + idOrUuidOrRef)
|
||||
}
|
||||
|
||||
// Returns the object for a given opaque reference (internal to
|
||||
@@ -458,11 +462,11 @@ export class Xapi extends EventEmitter {
|
||||
getObjectByRef (ref, defaultValue) {
|
||||
const object = this._objectsByRefs[ref]
|
||||
|
||||
if (object) return object
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the ref ' + ref)
|
||||
throw new Error('no object with opaque ref: ' + ref)
|
||||
}
|
||||
|
||||
// Returns the object for a given UUID (unique identifier that some
|
||||
@@ -475,7 +479,7 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the UUID ' + uuid)
|
||||
throw new Error('no object with UUID: ' + uuid)
|
||||
}
|
||||
|
||||
getRecord (type, ref) {
|
||||
@@ -575,31 +579,31 @@ export class Xapi extends EventEmitter {
|
||||
// redirection before consuming body
|
||||
const promise = isStream
|
||||
? doRequest({
|
||||
body: '',
|
||||
body: '',
|
||||
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
response.req.abort()
|
||||
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
)
|
||||
throw error
|
||||
}
|
||||
)
|
||||
: doRequest()
|
||||
|
||||
return promise.then(response => {
|
||||
@@ -638,11 +642,11 @@ export class Xapi extends EventEmitter {
|
||||
let watcher = watchers[ref]
|
||||
if (watcher === undefined) {
|
||||
// sync check if the task is already settled
|
||||
const task = this.objects.all[ref]
|
||||
const task = this._objectsByRefs[ref]
|
||||
if (task !== undefined) {
|
||||
const result = getTaskResult(task, Promise.resolve, Promise.reject)
|
||||
if (result) {
|
||||
return result[0]
|
||||
const result = getTaskResult(task)
|
||||
if (result !== undefined) {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,11 +793,12 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (
|
||||
taskWatcher !== undefined &&
|
||||
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
|
||||
) {
|
||||
delete taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
const result = getTaskResult(object)
|
||||
if (result !== undefined) {
|
||||
taskWatcher.resolve(result)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -813,7 +818,10 @@ export class Xapi extends EventEmitter {
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
taskWatcher.reject(new Error('task has been destroyed before completion'))
|
||||
const error = new Error('task has been destroyed before completion')
|
||||
error.task = object
|
||||
error.taskRef = ref
|
||||
taskWatcher.reject(error)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -105,6 +105,12 @@ encoding by prefixing with `json:`:
|
||||
> xo-cli foo.bar baz='json:[1, 2, 3]'
|
||||
```
|
||||
|
||||
##### Configuration export
|
||||
|
||||
```
|
||||
> xo-cli xo.exportConfig @=config.json
|
||||
```
|
||||
|
||||
##### VM export
|
||||
|
||||
```
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.40",
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^2.2.0",
|
||||
"event-to-promise": "^0.8.0",
|
||||
@@ -49,10 +49,10 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
@@ -62,7 +62,6 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"pretest": "flow status"
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,6 +328,15 @@ async function listObjects (args) {
|
||||
}
|
||||
exports.listObjects = listObjects
|
||||
|
||||
function ensurePathParam (method, value) {
|
||||
if (typeof value !== 'string') {
|
||||
const error =
|
||||
method +
|
||||
' requires the @ parameter to be a path (e.g. @=/tmp/config.json)'
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async function call (args) {
|
||||
if (!args.length) {
|
||||
throw new Error('missing command name')
|
||||
@@ -350,6 +359,7 @@ async function call (args) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = resolveUrl(baseUrl, result[key])
|
||||
const output = createWriteStream(file)
|
||||
|
||||
@@ -371,6 +381,7 @@ async function call (args) {
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = resolveUrl(baseUrl, result[key])
|
||||
|
||||
const stats = await stat(file)
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepare": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -3,22 +3,32 @@ import { Strategy } from 'passport-saml'
|
||||
// ===================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
description:
|
||||
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
|
||||
type: 'object',
|
||||
properties: {
|
||||
cert: {
|
||||
title: 'Certificate',
|
||||
description: "Copy/paste the identity provider's certificate",
|
||||
type: 'string',
|
||||
},
|
||||
entryPoint: {
|
||||
title: 'Entry point',
|
||||
description: 'Entry point of the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
issuer: {
|
||||
title: 'Issuer',
|
||||
description: 'Issuer string to supply to the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer'],
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -42,6 +52,7 @@ class AuthSamlXoPlugin {
|
||||
new Strategy(this._conf, async (profile, done) => {
|
||||
const name = profile[this._usernameField]
|
||||
if (!name) {
|
||||
console.warn('xo-server-auth-saml:', profile)
|
||||
done('no name found for this user')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-backup-reports",
|
||||
"version": "0.9.1",
|
||||
"version": "0.11.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Backup reports plugin for XO-Server",
|
||||
"keywords": [
|
||||
@@ -35,6 +35,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"babel-runtime": "^6.26.0",
|
||||
"human-format": "^0.10.0",
|
||||
"lodash": "^4.13.1",
|
||||
"moment-timezone": "^0.5.13"
|
||||
@@ -42,6 +43,7 @@
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.24.1",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.5.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.1"
|
||||
@@ -56,7 +58,8 @@
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash"
|
||||
"lodash",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import humanFormat from 'human-format'
|
||||
import moment from 'moment-timezone'
|
||||
import { forEach, startCase } from 'lodash'
|
||||
import { find, forEach, get, startCase } from 'lodash'
|
||||
|
||||
import pkg from '../package'
|
||||
|
||||
@@ -41,9 +41,9 @@ const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
|
||||
const createDateFormater = timezone =>
|
||||
timezone !== undefined
|
||||
? timestamp =>
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
moment(timestamp)
|
||||
.tz(timezone)
|
||||
.format(DATE_FORMAT)
|
||||
: timestamp => moment(timestamp).format(DATE_FORMAT)
|
||||
|
||||
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
|
||||
@@ -66,6 +66,7 @@ const logError = e => {
|
||||
console.error('backup report error:', e)
|
||||
}
|
||||
|
||||
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
|
||||
const NO_SUCH_OBJECT_ERROR = 'no such object'
|
||||
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
|
||||
const UNHEALTHY_VDI_CHAIN_MESSAGE =
|
||||
@@ -94,14 +95,351 @@ class BackupReportsXoPlugin {
|
||||
this._xo.removeListener('job:terminated', this._report)
|
||||
}
|
||||
|
||||
_wrapper (status) {
|
||||
return new Promise(resolve => resolve(this._listener(status))).catch(
|
||||
logError
|
||||
)
|
||||
_wrapper (status, job, schedule) {
|
||||
return new Promise(resolve =>
|
||||
resolve(
|
||||
job.type === 'backup'
|
||||
? this._backupNgListener(status, job, schedule)
|
||||
: this._listener(status, job, schedule)
|
||||
)
|
||||
).catch(logError)
|
||||
}
|
||||
|
||||
async _backupNgListener (runJobId, _, { timezone }) {
|
||||
const xo = this._xo
|
||||
const logs = await xo.getBackupNgLogs(runJobId)
|
||||
const jobLog = logs['roots'][0]
|
||||
const vmsTaskLog = logs[jobLog.id]
|
||||
|
||||
const { reportWhen, mode } = jobLog.data || {}
|
||||
if (reportWhen === 'never') {
|
||||
return
|
||||
}
|
||||
|
||||
const formatDate = createDateFormater(timezone)
|
||||
const jobName = (await xo.getJob(jobLog.jobId, 'backup')).name
|
||||
|
||||
if (jobLog.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
jobLog.error.message === NO_VMS_MATCH_THIS_PATTERN
|
||||
? ['Skipped', ICON_SKIPPED]
|
||||
: ['Failure', ICON_FAILURE]
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(jobLog.start)}`,
|
||||
`- **End time**: ${formatDate(jobLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(jobLog.duration)}`,
|
||||
`- **Error**: ${jobLog.error.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Backup report for ${jobName} - Error : ${
|
||||
jobLog.error.message
|
||||
}`,
|
||||
})
|
||||
}
|
||||
|
||||
const failedVmsText = []
|
||||
const skippedVmsText = []
|
||||
const successfulVmsText = []
|
||||
const nagiosText = []
|
||||
|
||||
let globalMergeSize = 0
|
||||
let globalTransferSize = 0
|
||||
let nFailures = 0
|
||||
let nSkipped = 0
|
||||
|
||||
for (const vmTaskLog of vmsTaskLog || []) {
|
||||
const vmTaskStatus = vmTaskLog.status
|
||||
if (vmTaskStatus === 'success' && reportWhen === 'failure') {
|
||||
return
|
||||
}
|
||||
|
||||
const vmId = vmTaskLog.data.id
|
||||
let vm
|
||||
try {
|
||||
vm = xo.getObject(vmId)
|
||||
} catch (e) {}
|
||||
const text = [
|
||||
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
|
||||
'',
|
||||
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
|
||||
`- **Start time**: ${formatDate(vmTaskLog.start)}`,
|
||||
`- **End time**: ${formatDate(vmTaskLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(vmTaskLog.duration)}`,
|
||||
]
|
||||
|
||||
const failedSubTasks = []
|
||||
const operationsText = []
|
||||
const srsText = []
|
||||
const remotesText = []
|
||||
for (const subTaskLog of logs[vmTaskLog.taskId] || []) {
|
||||
const { data, status, result, message } = subTaskLog
|
||||
const icon =
|
||||
subTaskLog.status === 'success' ? ICON_SUCCESS : ICON_FAILURE
|
||||
const errorMessage = ` **Error**: ${get(result, 'message')}`
|
||||
|
||||
if (message === 'snapshot') {
|
||||
operationsText.push(`- **Snapshot** ${icon}`)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push('Snapshot')
|
||||
operationsText.push('', errorMessage)
|
||||
}
|
||||
} else if (data.type === 'remote') {
|
||||
const remoteId = data.id
|
||||
const remote = await xo.getRemote(remoteId).catch(() => {})
|
||||
remotesText.push(
|
||||
`- **${
|
||||
remote !== undefined ? remote.name : `Remote Not found`
|
||||
}** (${remoteId}) ${icon}`
|
||||
)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push(remote !== undefined ? remote.name : remoteId)
|
||||
remotesText.push('', errorMessage)
|
||||
}
|
||||
} else {
|
||||
const srId = data.id
|
||||
let sr
|
||||
try {
|
||||
sr = xo.getObject(srId)
|
||||
} catch (e) {}
|
||||
const [srName, srUuid] =
|
||||
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, srId]
|
||||
srsText.push(`- **${srName}** (${srUuid}) ${icon}`)
|
||||
if (status === 'failure') {
|
||||
failedSubTasks.push(sr !== undefined ? sr.name_label : srId)
|
||||
srsText.push('', errorMessage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (operationsText.length !== 0) {
|
||||
operationsText.unshift(`#### Operations`, '')
|
||||
}
|
||||
if (srsText.length !== 0) {
|
||||
srsText.unshift(`#### SRs`, '')
|
||||
}
|
||||
if (remotesText.length !== 0) {
|
||||
remotesText.unshift(`#### remotes`, '')
|
||||
}
|
||||
const subText = [...operationsText, '', ...srsText, '', ...remotesText]
|
||||
const result = vmTaskLog.result
|
||||
if (vmTaskStatus === 'failure' && result !== undefined) {
|
||||
const { message } = result
|
||||
if (isSkippedError(result)) {
|
||||
++nSkipped
|
||||
skippedVmsText.push(
|
||||
...text,
|
||||
`- **Reason**: ${
|
||||
message === UNHEALTHY_VDI_CHAIN_ERROR
|
||||
? UNHEALTHY_VDI_CHAIN_MESSAGE
|
||||
: message
|
||||
}`,
|
||||
''
|
||||
)
|
||||
nagiosText.push(
|
||||
`[(Skipped) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
} : ${message} ]`
|
||||
)
|
||||
} else {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, `- **Error**: ${message}`, '')
|
||||
|
||||
nagiosText.push(
|
||||
`[(Failed) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
} : ${message} ]`
|
||||
)
|
||||
}
|
||||
} else {
|
||||
let transferSize, transferDuration, mergeSize, mergeDuration
|
||||
|
||||
forEach(logs[vmTaskLog.taskId], ({ taskId }) => {
|
||||
if (transferSize !== undefined) {
|
||||
return false
|
||||
}
|
||||
|
||||
const transferTask = find(logs[taskId], { message: 'transfer' })
|
||||
if (transferTask !== undefined) {
|
||||
transferSize = transferTask.result.size
|
||||
transferDuration = transferTask.end - transferTask.start
|
||||
}
|
||||
|
||||
const mergeTask = find(logs[taskId], { message: 'merge' })
|
||||
if (mergeTask !== undefined) {
|
||||
mergeSize = mergeTask.result.size
|
||||
mergeDuration = mergeTask.end - mergeTask.start
|
||||
}
|
||||
})
|
||||
if (transferSize !== undefined) {
|
||||
globalTransferSize += transferSize
|
||||
text.push(
|
||||
`- **Transfer size**: ${formatSize(transferSize)}`,
|
||||
`- **Transfer speed**: ${formatSpeed(
|
||||
transferSize,
|
||||
transferDuration
|
||||
)}`
|
||||
)
|
||||
}
|
||||
if (mergeSize !== undefined) {
|
||||
globalMergeSize += mergeSize
|
||||
text.push(
|
||||
`- **Merge size**: ${formatSize(mergeSize)}`,
|
||||
`- **Merge speed**: ${formatSpeed(mergeSize, mergeDuration)}`
|
||||
)
|
||||
}
|
||||
if (vmTaskStatus === 'failure') {
|
||||
++nFailures
|
||||
failedVmsText.push(...text, '', '', ...subText, '')
|
||||
nagiosText.push(
|
||||
`[(Failed) ${
|
||||
vm !== undefined ? vm.name_label : 'undefined'
|
||||
}: (failed)[${failedSubTasks.toString()}]]`
|
||||
)
|
||||
} else {
|
||||
successfulVmsText.push(...text, '', '', ...subText, '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const globalSuccess = nFailures === 0 && nSkipped === 0
|
||||
if (reportWhen === 'failure' && globalSuccess) {
|
||||
return
|
||||
}
|
||||
|
||||
const nVms = vmsTaskLog.length
|
||||
const nSuccesses = nVms - nFailures - nSkipped
|
||||
const globalStatus = globalSuccess
|
||||
? `Success`
|
||||
: nFailures !== 0 ? `Failure` : `Skipped`
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **mode**: ${mode}`,
|
||||
`- **Start time**: ${formatDate(jobLog.start)}`,
|
||||
`- **End time**: ${formatDate(jobLog.end)}`,
|
||||
`- **Duration**: ${formatDuration(jobLog.duration)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nVms}`,
|
||||
]
|
||||
|
||||
if (globalTransferSize !== 0) {
|
||||
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
|
||||
}
|
||||
if (globalMergeSize !== 0) {
|
||||
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
|
||||
}
|
||||
markdown.push('')
|
||||
|
||||
if (nFailures !== 0) {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
|
||||
'',
|
||||
...failedVmsText
|
||||
)
|
||||
}
|
||||
|
||||
if (nSkipped !== 0) {
|
||||
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
|
||||
}
|
||||
|
||||
if (nSuccesses !== 0 && reportWhen !== 'failure') {
|
||||
markdown.push(
|
||||
'---',
|
||||
'',
|
||||
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
|
||||
'',
|
||||
...successfulVmsText
|
||||
)
|
||||
}
|
||||
|
||||
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
nagiosStatus,
|
||||
message: nagiosMarkdown,
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
_listener (status) {
|
||||
const { calls } = status
|
||||
const { calls, timezone, error } = status
|
||||
const formatDate = createDateFormater(timezone)
|
||||
|
||||
if (status.error !== undefined) {
|
||||
const [globalStatus, icon] =
|
||||
error.message === NO_VMS_MATCH_THIS_PATTERN
|
||||
? ['Skipped', ICON_SKIPPED]
|
||||
: ['Failure', ICON_FAILURE]
|
||||
|
||||
let markdown = [
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(status.end - status.start)}`,
|
||||
`- **Error**: ${error.message}`,
|
||||
'---',
|
||||
'',
|
||||
`*${pkg.name} v${pkg.version}*`,
|
||||
]
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
return this._sendReport({
|
||||
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
|
||||
markdown,
|
||||
nagiosStatus: 2,
|
||||
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
|
||||
error.message
|
||||
}`,
|
||||
})
|
||||
}
|
||||
|
||||
const callIds = Object.keys(calls)
|
||||
|
||||
const nCalls = callIds.length
|
||||
@@ -139,8 +477,6 @@ class BackupReportsXoPlugin {
|
||||
const skippedBackupsText = []
|
||||
const successfulBackupText = []
|
||||
|
||||
const formatDate = createDateFormater(status.timezone)
|
||||
|
||||
forEach(calls, call => {
|
||||
const { id = call.params.vm } = call.params
|
||||
|
||||
@@ -226,9 +562,8 @@ class BackupReportsXoPlugin {
|
||||
return
|
||||
}
|
||||
|
||||
const { end, start } = status
|
||||
const { tag } = oneCall.params
|
||||
const duration = end - start
|
||||
const duration = status.end - status.start
|
||||
const nSuccesses = nCalls - nFailures - nSkipped
|
||||
const globalStatus = globalSuccess
|
||||
? `Success`
|
||||
@@ -238,8 +573,8 @@ class BackupReportsXoPlugin {
|
||||
`## Global status: ${globalStatus}`,
|
||||
'',
|
||||
`- **Type**: ${formatMethod(method)}`,
|
||||
`- **Start time**: ${formatDate(start)}`,
|
||||
`- **End time**: ${formatDate(end)}`,
|
||||
`- **Start time**: ${formatDate(status.start)}`,
|
||||
`- **End time**: ${formatDate(status.end)}`,
|
||||
`- **Duration**: ${formatDuration(duration)}`,
|
||||
`- **Successes**: ${nSuccesses} / ${nCalls}`,
|
||||
]
|
||||
@@ -285,37 +620,20 @@ class BackupReportsXoPlugin {
|
||||
|
||||
markdown = markdown.join('\n')
|
||||
|
||||
const xo = this._xo
|
||||
return Promise.all([
|
||||
xo.sendEmail !== undefined &&
|
||||
xo.sendEmail({
|
||||
to: this._mailsReceivers,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
markdown,
|
||||
}),
|
||||
xo.sendToXmppClient !== undefined &&
|
||||
xo.sendToXmppClient({
|
||||
to: this._xmppReceivers,
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendSlackMessage !== undefined &&
|
||||
xo.sendSlackMessage({
|
||||
message: markdown,
|
||||
}),
|
||||
xo.sendPassiveCheck !== undefined &&
|
||||
xo.sendPassiveCheck({
|
||||
status: globalSuccess ? 0 : 2,
|
||||
message: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
}),
|
||||
])
|
||||
return this._sendReport({
|
||||
markdown,
|
||||
subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${
|
||||
globalSuccess
|
||||
? ICON_SUCCESS
|
||||
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
|
||||
}`,
|
||||
nagiosStatus: globalSuccess ? 0 : 2,
|
||||
nagiosMarkdown: globalSuccess
|
||||
? `[Xen Orchestra] [Success] Backup report for ${tag}`
|
||||
: `[Xen Orchestra] [${
|
||||
nFailures !== 0 ? 'Failure' : 'Skipped'
|
||||
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -128,10 +128,15 @@ class XoServerCloud {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const namespaceCatalog = await this._getNamespaceCatalog(namespace)
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token: namespaceCatalog._token,
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.11.6",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-perf-alert",
|
||||
"version": "0.0.0",
|
||||
"version": "0.1.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -20,16 +20,16 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "^7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "^7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import JSON5 from 'json5'
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { forOwn, map, mean } from 'lodash'
|
||||
import { assign, forOwn, map, mean } from 'lodash'
|
||||
import { utcParse } from 'd3-time-format'
|
||||
|
||||
const VM_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'VM CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'VM memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
|
||||
|
||||
const HOST_FUNCTIONS = {
|
||||
cpuUsage: {
|
||||
name: 'host CPU usage',
|
||||
description:
|
||||
'Raises an alarm when the average usage of any CPU is higher than the threshold',
|
||||
unit: '%',
|
||||
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
|
||||
},
|
||||
},
|
||||
memoryUsage: {
|
||||
name: 'host memory usage',
|
||||
description:
|
||||
'Raises an alarm when the used memory % is higher than the threshold',
|
||||
unit: '% used',
|
||||
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
|
||||
)
|
||||
},
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => {
|
||||
return getDisplayableValue() > threshold
|
||||
},
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const SR_FUNCTIONS = {
|
||||
storageUsage: {
|
||||
name: 'SR storage usage',
|
||||
description:
|
||||
'Raises an alarm when the used disk space % is higher than the threshold',
|
||||
unit: '% used',
|
||||
comparator: '>',
|
||||
createGetter: threshold => sr => {
|
||||
const getDisplayableValue = () =>
|
||||
sr.physical_utilisation * 100 / sr.physical_size
|
||||
return {
|
||||
getDisplayableValue,
|
||||
shouldAlarm: () => getDisplayableValue() > threshold,
|
||||
}
|
||||
},
|
||||
},
|
||||
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
|
||||
const TYPE_FUNCTION_MAP = {
|
||||
vm: VM_FUNCTIONS,
|
||||
host: HOST_FUNCTIONS,
|
||||
sr: SR_FUNCTIONS,
|
||||
}
|
||||
|
||||
// list of currently ringing alarms, to avoid double notification
|
||||
@@ -229,11 +250,52 @@ export const configurationSchema = {
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
srMonitors: {
|
||||
type: 'array',
|
||||
title: 'SR Monitors',
|
||||
description:
|
||||
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
|
||||
'The Average is compared to the threshold and an alarm is raised upon crossing',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
uuids: {
|
||||
title: 'SRs',
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
$type: 'SR',
|
||||
},
|
||||
},
|
||||
variableName: {
|
||||
title: 'Alarm Type',
|
||||
description: Object.keys(SR_FUNCTIONS)
|
||||
.map(
|
||||
k =>
|
||||
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
|
||||
SR_FUNCTIONS[k].description
|
||||
}`
|
||||
)
|
||||
.join('\n'),
|
||||
type: 'string',
|
||||
default: Object.keys(SR_FUNCTIONS)[0],
|
||||
enum: Object.keys(SR_FUNCTIONS),
|
||||
},
|
||||
alarmTriggerLevel: {
|
||||
title: 'Threshold',
|
||||
description:
|
||||
'The direction of the crossing is given by the Alarm type',
|
||||
type: 'number',
|
||||
default: 80,
|
||||
},
|
||||
},
|
||||
required: ['uuids'],
|
||||
},
|
||||
},
|
||||
toEmails: {
|
||||
type: 'array',
|
||||
title: 'Email addresses',
|
||||
description: 'Email addresses of the alert recipients',
|
||||
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
|
||||
currentAlarms[alarmId] = true
|
||||
raiseCallback(alarmId)
|
||||
}
|
||||
} else {
|
||||
if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
} else if (current) {
|
||||
try {
|
||||
lowerCallback(alarmId)
|
||||
} finally {
|
||||
delete currentAlarms[alarmId]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
|
||||
clearCurrentAlarms()
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_generateUrl (type, object) {
|
||||
const map = {
|
||||
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
|
||||
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
|
||||
const { baseUrl } = this._configuration
|
||||
const { uuid } = object
|
||||
switch (type) {
|
||||
case 'vm':
|
||||
return `${baseUrl}#/vms/${uuid}/stats`
|
||||
case 'host':
|
||||
return `${baseUrl}#/hosts/${uuid}/stats`
|
||||
case 'sr':
|
||||
return `${baseUrl}#/srs/${uuid}/general`
|
||||
default:
|
||||
return 'unknown type'
|
||||
}
|
||||
return map[type]()
|
||||
}
|
||||
|
||||
async test () {
|
||||
const hostMonitorPart2 = await Promise.all(
|
||||
map(this._getMonitors(), async m => {
|
||||
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
|
||||
return `
|
||||
const monitorBodies = await Promise.all(
|
||||
map(
|
||||
this._getMonitors(),
|
||||
async m => `
|
||||
## Monitor for ${m.title}
|
||||
|
||||
${m.tableHeader}
|
||||
${tableBody.join('')}`
|
||||
})
|
||||
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
|
||||
)
|
||||
)
|
||||
|
||||
this._sendAlertEmail(
|
||||
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
|
||||
`
|
||||
# Performance Alert Test
|
||||
Your alarms and their current status:
|
||||
${hostMonitorPart2.join('\n')}`
|
||||
${monitorBodies.join('\n')}`
|
||||
)
|
||||
}
|
||||
|
||||
load () {
|
||||
this._job.start()
|
||||
}
|
||||
|
||||
unload () {
|
||||
this._job.stop()
|
||||
}
|
||||
|
||||
_parseDefinition (definition) {
|
||||
const alarmId = `${definition.objectType}|${definition.variableName}|${
|
||||
definition.alarmTriggerLevel
|
||||
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
|
||||
definition.alarmTriggerPeriod !== undefined
|
||||
? definition.alarmTriggerPeriod
|
||||
: 60
|
||||
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
|
||||
return {
|
||||
...definition,
|
||||
alarmId,
|
||||
vmFunction: typeFunction,
|
||||
title: `${typeText} ${definition.variableName} ${
|
||||
typeFunction.comparator
|
||||
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
|
||||
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
|
||||
title: `${typeFunction.name} ${typeFunction.comparator} ${
|
||||
definition.alarmTriggerLevel
|
||||
}${typeFunction.unit}`,
|
||||
snapshot: async () => {
|
||||
return Promise.all(
|
||||
map(definition.uuids, async uuid => {
|
||||
try {
|
||||
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
|
||||
const objectLink = `[${
|
||||
monitoredObject.name_label
|
||||
}](${this._generateUrl(definition.objectType, monitoredObject)})`
|
||||
const rrd = await this.getRrd(monitoredObject, observationPeriod)
|
||||
const couldFindRRD = rrd !== null
|
||||
const result = {
|
||||
object: monitoredObject,
|
||||
couldFindRRD,
|
||||
objectLink: objectLink,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
|
||||
uuid,
|
||||
name: definition.name,
|
||||
object: this._xo.getXapi(uuid).getObject(uuid),
|
||||
}
|
||||
if (!couldFindRRD) {
|
||||
return result
|
||||
|
||||
if (result.object === undefined) {
|
||||
throw new Error('object not found')
|
||||
}
|
||||
const data = parseData(rrd, monitoredObject.uuid)
|
||||
const textValue =
|
||||
data.getDisplayableValue().toFixed(1) + typeFunction.unit
|
||||
const shouldAlarm = data.shouldAlarm()
|
||||
return {
|
||||
...result,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: shouldAlarm,
|
||||
textValue: textValue,
|
||||
listItem: ` * ${typeText} ${objectLink} ${
|
||||
definition.variableName
|
||||
}: ${textValue}\n`,
|
||||
tableItem: `${objectLink} | ${textValue} | ${
|
||||
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
|
||||
}\n`,
|
||||
|
||||
result.objectLink = `[${
|
||||
result.object.name_label
|
||||
}](${this._generateUrl(definition.objectType, result.object)})`
|
||||
|
||||
if (typeFunction.createGetter === undefined) {
|
||||
// Stats via RRD
|
||||
result.rrd = await this.getRrd(result.object, observationPeriod)
|
||||
if (result.rrd !== null) {
|
||||
const data = parseData(result.rrd, result.object.uuid)
|
||||
assign(result, {
|
||||
data,
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Stats via XAPI
|
||||
const getter = typeFunction.createGetter(
|
||||
definition.alarmTriggerLevel
|
||||
)
|
||||
const data = getter(result.object)
|
||||
assign(result, {
|
||||
value: data.getDisplayableValue(),
|
||||
shouldAlarm: data.shouldAlarm(),
|
||||
})
|
||||
}
|
||||
|
||||
result.listItem = ` * ${result.objectLink}: ${
|
||||
result.value === undefined
|
||||
? "**Can't read performance counters**"
|
||||
: result.value.toFixed(1) + typeFunction.unit
|
||||
}\n`
|
||||
|
||||
return result
|
||||
} catch (_) {
|
||||
return {
|
||||
uuid,
|
||||
object: null,
|
||||
couldFindRRD: false,
|
||||
objectLink: `cannot find object ${uuid}`,
|
||||
listItem: ` * ${typeText} ${uuid} ${
|
||||
definition.variableName
|
||||
}: **Can't read performance counters**\n`,
|
||||
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
|
||||
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
|
||||
_getMonitors () {
|
||||
return map(this._configuration.hostMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'host' })
|
||||
).concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.vmMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'vm' })
|
||||
)
|
||||
)
|
||||
.concat(
|
||||
map(this._configuration.srMonitors, def =>
|
||||
this._parseDefinition({ ...def, objectType: 'sr' })
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
async _checkMonitors () {
|
||||
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
|
||||
for (const entry of snapshot) {
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}|RRD`,
|
||||
!entry.couldFindRRD,
|
||||
entry.value === undefined,
|
||||
() => {
|
||||
this._sendAlertEmail(
|
||||
'Secondary Issue',
|
||||
@@ -477,9 +553,11 @@ ${entry.listItem}`
|
||||
},
|
||||
() => {}
|
||||
)
|
||||
if (!entry.couldFindRRD) {
|
||||
|
||||
if (entry.value === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
const raiseAlarm = alarmId => {
|
||||
// sample XenCenter message:
|
||||
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
|
||||
@@ -500,23 +578,24 @@ ${entry.listItem}`
|
||||
this._sendAlertEmail(
|
||||
'',
|
||||
`
|
||||
## ALERT ${monitor.title}
|
||||
## ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
const lowerAlarm = alarmId => {
|
||||
console.log('lowering Alarm', alarmId)
|
||||
this._sendAlertEmail(
|
||||
'END OF ALERT',
|
||||
`
|
||||
## END OF ALERT ${monitor.title}
|
||||
## END OF ALERT: ${monitor.title}
|
||||
${entry.listItem}
|
||||
### Description
|
||||
${monitor.vmFunction.description}`
|
||||
)
|
||||
}
|
||||
|
||||
raiseOrLowerAlarm(
|
||||
`${monitor.alarmId}|${entry.uuid}`,
|
||||
entry.shouldAlarm,
|
||||
|
||||
@@ -58,7 +58,8 @@ export const configurationSchema = {
|
||||
},
|
||||
port: {
|
||||
type: 'integer',
|
||||
description: 'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
description:
|
||||
'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
},
|
||||
secure: {
|
||||
default: false,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.3.2",
|
||||
"version": "0.4.2",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -34,7 +34,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.23.0",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
.top table{
|
||||
margin: auto;
|
||||
margin-top: 20px;
|
||||
width: 400px;
|
||||
min-width: 30em;
|
||||
}
|
||||
|
||||
.top table caption {
|
||||
@@ -121,6 +121,10 @@
|
||||
border:1px solid #95a5a6;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.allResources table {
|
||||
min-width: 60em
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
@@ -151,86 +155,34 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.vms.number}}</td>
|
||||
<td>
|
||||
{{#if global.vmsEvolution.number}}
|
||||
{{#compare global.vmsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{global.vms.number}} {{normaliseEvolution global.vmsEvolution.number}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.vms.cpu}} %</td> <!-- One condition doesn't work -->
|
||||
<td style='color:{{#compare global.vmsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.cpu}}
|
||||
{{#compare global.vmsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{normaliseValue global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.vms.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.ram}}
|
||||
{{#compare global.vmsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{normaliseValue global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk read:</td>
|
||||
<td>{{global.vms.diskRead}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskRead ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskRead}}
|
||||
{{#compare global.vmsEvolution.diskRead ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskRead}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk write:</td>
|
||||
<td>{{global.vms.diskWrite}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskWrite ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskWrite}}
|
||||
{{#compare global.vmsEvolution.diskWrite ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskWrite}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.vms.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netReception}}
|
||||
{{#compare global.vmsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{normaliseValue global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.vms.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netTransmission}}
|
||||
{{#compare global.vmsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{normaliseValue global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -253,7 +205,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} %</td>
|
||||
<td>{{normaliseValue this.value}} %</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
|
||||
@@ -264,7 +216,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -274,7 +226,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} MiB</td>
|
||||
<td>{{normaliseValue this.value}} MiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -284,27 +236,27 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} MiB</td>
|
||||
<td>{{normaliseValue this.value}} MiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topVms.netReception}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topVms.netTransmission}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -318,75 +270,33 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.hosts.number}}</td>
|
||||
<td>
|
||||
{{#if global.hostsEvolution.number}}
|
||||
{{#compare global.hostsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.number}} {{normaliseEvolution global.hostsEvolution.number}}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.hosts.cpu}} %</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.cpu}}
|
||||
{{#compare global.hostsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.hosts.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.ram}}
|
||||
{{#compare global.hostsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
|
||||
</td>
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Load average:</td>
|
||||
<td>{{global.hosts.load}} </td>
|
||||
<td style='color:{{#compare global.hostsEvolution.load ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.load}}
|
||||
{{#compare global.hostsEvolution.load ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.load}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{normaliseValue global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.hosts.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netReception}}
|
||||
{{#compare global.hostsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{normaliseValue global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.hosts.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netTransmission}}
|
||||
{{#compare global.hostsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{normaliseValue global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -408,7 +318,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} %</td>
|
||||
<td>{{normaliseValue this.value}} %</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -418,7 +328,7 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
@@ -428,27 +338,27 @@
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} </td>
|
||||
<td>{{normaliseValue this.value}} </td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netReception}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netTransmission}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.value}} KiB</td>
|
||||
<td>{{normaliseValue this.value}} KiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -464,11 +374,11 @@
|
||||
<th>Name</th>
|
||||
<th>value</th>
|
||||
</tr>
|
||||
{{#each topAllocation}}
|
||||
{{#each topSrs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.size}} GiB</td>
|
||||
<td>{{normaliseValue this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -533,8 +443,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.added}}
|
||||
{{#each vmsRessourcesEvolution.added}}
|
||||
{{#if vmsResourcesEvolution.added}}
|
||||
{{#each vmsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -553,8 +463,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.removed}}
|
||||
{{#each vmsRessourcesEvolution.removed}}
|
||||
{{#if vmsResourcesEvolution.removed}}
|
||||
{{#each vmsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -572,8 +482,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.added}}
|
||||
{{#each hostsRessourcesEvolution.added}}
|
||||
{{#if hostsResourcesEvolution.added}}
|
||||
{{#each hostsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -591,8 +501,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.removed}}
|
||||
{{#each hostsRessourcesEvolution.removed}}
|
||||
{{#if hostsResourcesEvolution.removed}}
|
||||
{{#each hostsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -606,5 +516,81 @@
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{#if allResources}}
|
||||
<div class="page">
|
||||
<div class="top allResources">
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<h3 style="text-align: center;">All resources</h3>
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<table>
|
||||
<caption>VMs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Disk read (MiB)</th>
|
||||
<th>Disk write (MiB)</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.vms}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.diskRead}} {{normaliseEvolution this.evolution.diskRead}}</td>
|
||||
<td>{{normaliseValue this.diskWrite}} {{normaliseEvolution this.evolution.diskWrite}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>Hosts</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Load average</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.hosts}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.load}} {{normaliseEvolution this.evolution.load}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>SRs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>Total space (GiB)</th>
|
||||
<th>Used space (GiB)</th>
|
||||
<th>Free space (GiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.srs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.total}} {{normaliseEvolution this.evolution.total}}</td>
|
||||
<td>{{normaliseValue this.used}}</td>
|
||||
<td>{{normaliseValue this.free}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{/if}}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -6,7 +6,9 @@ import {
|
||||
concat,
|
||||
differenceBy,
|
||||
filter,
|
||||
find,
|
||||
forEach,
|
||||
get,
|
||||
isFinite,
|
||||
map,
|
||||
orderBy,
|
||||
@@ -67,6 +69,10 @@ export const configurationSchema = {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
all: {
|
||||
type: 'boolean',
|
||||
description: "It includes all resources' stats if on.",
|
||||
},
|
||||
periodicity: {
|
||||
type: 'string',
|
||||
enum: ['monthly', 'weekly'],
|
||||
@@ -88,12 +94,12 @@ Handlebars.registerHelper('compare', function (
|
||||
options
|
||||
) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "compare" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "compare" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!compareOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "compare" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "compare" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -104,12 +110,12 @@ Handlebars.registerHelper('compare', function (
|
||||
|
||||
Handlebars.registerHelper('math', function (lvalue, operator, rvalue, options) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "math" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "math" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!mathOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "math" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "math" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -122,6 +128,23 @@ Handlebars.registerHelper('shortUUID', uuid => {
|
||||
}
|
||||
})
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseValue',
|
||||
value => (isFinite(value) ? round(value, 2) : '-')
|
||||
)
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseEvolution',
|
||||
value =>
|
||||
new Handlebars.SafeString(
|
||||
isFinite(+value) && +value !== 0
|
||||
? (value = round(value, 2)) > 0
|
||||
? `(<b style="color: green;">▲ ${value}</b>)`
|
||||
: `(<b style="color: red;">▼ ${String(value).slice(1)}</b>)`
|
||||
: ''
|
||||
)
|
||||
)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function computeMean (values) {
|
||||
@@ -137,12 +160,12 @@ function computeMean (values) {
|
||||
return sum / n
|
||||
}
|
||||
|
||||
const computeDoubleMean = val => computeMean(val.map(computeMean))
|
||||
const computeDoubleMean = val => computeMean(map(val, computeMean))
|
||||
|
||||
function computeMeans (objects, options) {
|
||||
return zipObject(
|
||||
options,
|
||||
map(options, opt => round(computeMean(map(objects, opt)), 2))
|
||||
map(options, opt => computeMean(map(objects, opt)), 2)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -163,67 +186,103 @@ function getTop (objects, options) {
|
||||
obj => ({
|
||||
uuid: obj.uuid,
|
||||
name: obj.name,
|
||||
value: round(obj[opt], 2),
|
||||
value: obj[opt],
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
function conputePercentage (curr, prev, options) {
|
||||
function computePercentage (curr, prev, options) {
|
||||
return zipObject(
|
||||
options,
|
||||
map(
|
||||
options,
|
||||
opt =>
|
||||
prev[opt] === 0
|
||||
prev[opt] === 0 || prev[opt] === null
|
||||
? 'NONE'
|
||||
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
|
||||
: `${(curr[opt] - prev[opt]) * 100 / prev[opt]}`
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
function getDiff (oldElements, newElements) {
|
||||
return {
|
||||
added: differenceBy(oldElements, newElements, 'uuid'),
|
||||
removed: differenceBy(newElements, oldElements, 'uuid'),
|
||||
added: differenceBy(newElements, oldElements, 'uuid'),
|
||||
removed: differenceBy(oldElements, newElements, 'uuid'),
|
||||
}
|
||||
}
|
||||
|
||||
function getMemoryUsedMetric ({ memory, memoryFree = memory }) {
|
||||
return map(memory, (value, key) => value - memoryFree[key])
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function getVmsStats ({ runningVms, xo }) {
|
||||
return Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(vmStats.stats.memoryUsed) / gibPower,
|
||||
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
|
||||
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
|
||||
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
|
||||
}
|
||||
})
|
||||
async function getVmsStats ({ runningVms, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
|
||||
diskRead:
|
||||
computeDoubleMean(values(get(vmStats.stats.xvds, 'r'))) / mibPower,
|
||||
diskWrite:
|
||||
computeDoubleMean(values(get(vmStats.stats.xvds, 'w'))) / mibPower,
|
||||
netReception:
|
||||
computeDoubleMean(get(vmStats.stats.vifs, 'rx')) / kibPower,
|
||||
netTransmission:
|
||||
computeDoubleMean(get(vmStats.stats.vifs, 'tx')) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getHostsStats ({ runningHosts, xo }) {
|
||||
return Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
async function getHostsStats ({ runningHosts, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception:
|
||||
computeDoubleMean(get(hostStats.stats.pifs, 'rx')) / kibPower,
|
||||
netTransmission:
|
||||
computeDoubleMean(get(hostStats.stats.pifs, 'tx')) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getSrsStats (xoObjects) {
|
||||
return orderBy(
|
||||
map(filter(xoObjects, { type: 'SR' }), sr => {
|
||||
const total = sr.size / gibPower
|
||||
const used = sr.physical_usage / gibPower
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(hostStats.stats.memoryUsed) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
|
||||
uuid: sr.uuid,
|
||||
name: sr.name_label,
|
||||
total,
|
||||
used,
|
||||
free: total - used,
|
||||
}
|
||||
})
|
||||
}),
|
||||
'total',
|
||||
'desc'
|
||||
)
|
||||
}
|
||||
|
||||
@@ -303,20 +362,21 @@ function getTopHosts ({ hostsStats, xo }) {
|
||||
])
|
||||
}
|
||||
|
||||
function getMostAllocatedSpaces ({ disks, xo }) {
|
||||
return map(orderBy(disks, ['size'], ['desc']).slice(0, 3), disk => ({
|
||||
uuid: disk.uuid,
|
||||
name: disk.name_label,
|
||||
size: round(disk.size / gibPower, 2),
|
||||
}))
|
||||
function getTopSrs ({ srsStats, xo }) {
|
||||
return getTop(srsStats, ['total']).total
|
||||
}
|
||||
|
||||
async function getHostsMissingPatches ({ runningHosts, xo }) {
|
||||
const hostsMissingPatches = await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostsPatches = await xo
|
||||
let hostsPatches = await xo
|
||||
.getXapi(host)
|
||||
.listMissingPoolPatchesOnHost(host._xapiId)
|
||||
|
||||
if (host.license_params.sku_type === 'free') {
|
||||
hostsPatches = filter(hostsPatches, { paid: false })
|
||||
}
|
||||
|
||||
if (hostsPatches.length > 0) {
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
@@ -347,46 +407,75 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
|
||||
const prevDate = oldStats.style.currDate
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...conputePercentage(newStatsVms, oldStatsVms, [
|
||||
const resourcesOptions = {
|
||||
vms: [
|
||||
'cpu',
|
||||
'ram',
|
||||
'diskRead',
|
||||
'diskWrite',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
],
|
||||
hosts: ['cpu', 'ram', 'load', 'netReception', 'netTransmission'],
|
||||
srs: ['total'],
|
||||
}
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...computePercentage(newStatsVms, oldStatsVms, resourcesOptions.vms),
|
||||
}
|
||||
|
||||
const hostsEvolution = {
|
||||
number: newStatsHosts.number - oldStatsHosts.number,
|
||||
...conputePercentage(newStatsHosts, oldStatsHosts, [
|
||||
'cpu',
|
||||
'ram',
|
||||
'load',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
...computePercentage(
|
||||
newStatsHosts,
|
||||
oldStatsHosts,
|
||||
resourcesOptions.hosts
|
||||
),
|
||||
}
|
||||
|
||||
const vmsRessourcesEvolution = getDiff(
|
||||
const vmsResourcesEvolution = getDiff(
|
||||
oldStatsVms.allVms,
|
||||
newStatsVms.allVms
|
||||
)
|
||||
const hostsRessourcesEvolution = getDiff(
|
||||
const hostsResourcesEvolution = getDiff(
|
||||
oldStatsHosts.allHosts,
|
||||
newStatsHosts.allHosts
|
||||
)
|
||||
|
||||
const usersEvolution = getDiff(oldStats.users, newStats.users)
|
||||
|
||||
const newAllResourcesStats = newStats.allResources
|
||||
const oldAllResourcesStats = oldStats.allResources
|
||||
|
||||
// adding for each resource its evolution
|
||||
if (
|
||||
newAllResourcesStats !== undefined &&
|
||||
oldAllResourcesStats !== undefined
|
||||
) {
|
||||
forEach(newAllResourcesStats, (resource, key) => {
|
||||
const option = resourcesOptions[key]
|
||||
|
||||
if (option !== undefined) {
|
||||
forEach(resource, newItem => {
|
||||
const oldItem = find(oldAllResourcesStats[key], {
|
||||
uuid: newItem.uuid,
|
||||
})
|
||||
|
||||
if (oldItem !== undefined) {
|
||||
newItem.evolution = computePercentage(newItem, oldItem, option)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
vmsEvolution,
|
||||
hostsEvolution,
|
||||
prevDate,
|
||||
vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution,
|
||||
hostsResourcesEvolution,
|
||||
usersEvolution,
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -394,7 +483,7 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
}
|
||||
}
|
||||
|
||||
async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
async function dataBuilder ({ xo, storedStatsPath, all }) {
|
||||
const xoObjects = values(xo.getObjects())
|
||||
const runningVms = filter(xoObjects, { type: 'VM', power_state: 'Running' })
|
||||
const haltedVms = filter(xoObjects, { type: 'VM', power_state: 'Halted' })
|
||||
@@ -403,18 +492,17 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
power_state: 'Running',
|
||||
})
|
||||
const haltedHosts = filter(xoObjects, { type: 'host', power_state: 'Halted' })
|
||||
const disks = filter(xoObjects, { type: 'SR' })
|
||||
const [
|
||||
users,
|
||||
vmsStats,
|
||||
hostsStats,
|
||||
topAllocation,
|
||||
srsStats,
|
||||
hostsMissingPatches,
|
||||
] = await Promise.all([
|
||||
xo.getAllUsers(),
|
||||
getVmsStats({ xo, runningVms }),
|
||||
getHostsStats({ xo, runningHosts }),
|
||||
getMostAllocatedSpaces({ xo, disks }),
|
||||
getSrsStats(xoObjects),
|
||||
getHostsMissingPatches({ xo, runningHosts }),
|
||||
])
|
||||
|
||||
@@ -423,35 +511,50 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
globalHostsStats,
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
usersEmail,
|
||||
] = await Promise.all([
|
||||
computeGlobalVmsStats({ xo, vmsStats, haltedVms }),
|
||||
computeGlobalHostsStats({ xo, hostsStats, haltedHosts }),
|
||||
getTopVms({ xo, vmsStats }),
|
||||
getTopHosts({ xo, hostsStats }),
|
||||
getTopSrs({ xo, srsStats }),
|
||||
getAllUsersEmail(users),
|
||||
])
|
||||
|
||||
let allResources
|
||||
if (all) {
|
||||
allResources = {
|
||||
vms: vmsStats,
|
||||
hosts: hostsStats,
|
||||
srs: srsStats,
|
||||
date: currDate,
|
||||
}
|
||||
}
|
||||
|
||||
const evolution = await computeEvolution({
|
||||
allResources,
|
||||
storedStatsPath,
|
||||
hosts: globalHostsStats,
|
||||
usersEmail,
|
||||
vms: globalVmsStats,
|
||||
})
|
||||
|
||||
const data = {
|
||||
return {
|
||||
allResources,
|
||||
global: {
|
||||
vms: globalVmsStats,
|
||||
hosts: globalHostsStats,
|
||||
vmsEvolution: evolution && evolution.vmsEvolution,
|
||||
hostsEvolution: evolution && evolution.hostsEvolution,
|
||||
},
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
topVms,
|
||||
hostsMissingPatches,
|
||||
usersEmail,
|
||||
topAllocation,
|
||||
vmsRessourcesEvolution: evolution && evolution.vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution: evolution && evolution.hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution: evolution && evolution.vmsResourcesEvolution,
|
||||
hostsResourcesEvolution: evolution && evolution.hostsResourcesEvolution,
|
||||
usersEvolution: evolution && evolution.usersEvolution,
|
||||
style: {
|
||||
imgXo,
|
||||
@@ -460,8 +563,6 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
page: '{{page}}',
|
||||
},
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -472,6 +573,10 @@ class UsageReportPlugin {
|
||||
this._dir = getDataDir
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._xo.addApiMethod(
|
||||
'plugin.usageReport.send',
|
||||
this._sendReport.bind(this, false)
|
||||
)
|
||||
}
|
||||
|
||||
configure (configuration, state) {
|
||||
@@ -485,7 +590,7 @@ class UsageReportPlugin {
|
||||
configuration.periodicity === 'monthly' ? '00 06 1 * *' : '00 06 * * 0'
|
||||
).createJob(async () => {
|
||||
try {
|
||||
await this._sendReport()
|
||||
await this._sendReport(true)
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'[WARN] scheduled function:',
|
||||
@@ -511,13 +616,14 @@ class UsageReportPlugin {
|
||||
}
|
||||
|
||||
test () {
|
||||
return this._sendReport()
|
||||
return this._sendReport(true)
|
||||
}
|
||||
|
||||
async _sendReport () {
|
||||
async _sendReport (storeData) {
|
||||
const data = await dataBuilder({
|
||||
xo: this._xo,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
all: this._conf.all,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
@@ -537,10 +643,11 @@ class UsageReportPlugin {
|
||||
},
|
||||
],
|
||||
}),
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
storeData &&
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
3
packages/xo-server/.babelrc.js
Normal file
3
packages/xo-server/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -8,12 +8,14 @@ try {
|
||||
const filtered = frames.filter(function (frame) {
|
||||
const name = frame && frame.getFileName()
|
||||
|
||||
return (// has a filename
|
||||
return (
|
||||
// has a filename
|
||||
name &&
|
||||
// contains a separator (no internal modules)
|
||||
name.indexOf(sep) !== -1 &&
|
||||
// does not start with `internal`
|
||||
name.lastIndexOf('internal', 0) !== -1)
|
||||
name.lastIndexOf('internal', 0) !== -1
|
||||
)
|
||||
})
|
||||
|
||||
// depd (used amongst other by express requires at least 3 frames
|
||||
|
||||
13
packages/xo-server/bin/run-vhd-test
Executable file
13
packages/xo-server/bin/run-vhd-test
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict'
|
||||
|
||||
global.Promise = require('bluebird')
|
||||
|
||||
|
||||
process.on('unhandledRejection', function (reason) {
|
||||
console.warn('[Warn] Possibly unhandled rejection:', reason && reason.stack || reason)
|
||||
})
|
||||
|
||||
|
||||
require("exec-promise")(require("../dist/vhd-test").default)
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server",
|
||||
"version": "5.16.0",
|
||||
"version": "5.19.5",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -15,7 +15,6 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": "Julien Fontanet <julien.fontanet@vates.fr>",
|
||||
"preferGlobal": true,
|
||||
"files": [
|
||||
"better-stacks.js",
|
||||
@@ -29,16 +28,17 @@
|
||||
"bin": "bin"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.5"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"@xen-orchestra/fs": "^0.0.0",
|
||||
"ajv": "^6.1.1",
|
||||
"app-conf": "^0.5.0",
|
||||
"archiver": "^2.1.0",
|
||||
"babel-runtime": "^6.26.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^2.0.0",
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
@@ -54,31 +54,33 @@
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"execa": "^0.9.0",
|
||||
"execa": "^0.10.0",
|
||||
"express": "^4.16.2",
|
||||
"express-session": "^1.15.6",
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"get-stream": "^3.0.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.6.2",
|
||||
"helmet": "^3.9.0",
|
||||
"highland": "^2.11.1",
|
||||
"http-proxy": "^1.16.2",
|
||||
"http-request-plus": "^0.5.0",
|
||||
"http-server-plus": "^0.8.0",
|
||||
"http-server-plus": "^0.10.0",
|
||||
"human-format": "^0.10.0",
|
||||
"is-redirect": "^1.0.0",
|
||||
"jest-worker": "^22.4.3",
|
||||
"js-yaml": "^3.10.0",
|
||||
"json-rpc-peer": "^0.15.3",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"julien-f-source-map-support": "0.1.0",
|
||||
"julien-f-unzip": "^0.2.1",
|
||||
"kindof": "^2.0.0",
|
||||
"level": "^3.0.0",
|
||||
"level-party": "^3.0.4",
|
||||
"level-sublevel": "^6.6.1",
|
||||
"limit-concurrency-decorator": "^0.3.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"make-error": "^1",
|
||||
"micromatch": "^3.1.4",
|
||||
@@ -102,28 +104,37 @@
|
||||
"serve-static": "^1.13.1",
|
||||
"split-lines": "^1.1.0",
|
||||
"stack-chain": "^2.0.0",
|
||||
"stoppable": "^1.0.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"tar-stream": "^1.5.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.1.0",
|
||||
"ws": "^4.0.0",
|
||||
"xen-api": "^0.16.5",
|
||||
"value-matcher": "^0.2.0",
|
||||
"vhd-lib": "^0.0.0",
|
||||
"ws": "^5.0.0",
|
||||
"xen-api": "^0.16.9",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.2.3",
|
||||
"xo-collection": "^0.4.1",
|
||||
"xo-common": "^0.1.1",
|
||||
"xo-remote-parser": "^0.3",
|
||||
"xo-vmdk-to-vhd": "0.0.12"
|
||||
"xo-vmdk-to-vhd": "0.1.0",
|
||||
"yazl": "^2.4.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.0",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-decorators": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-decorators-legacy": "^1.3.4",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"babel-preset-stage-0": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
@@ -136,23 +147,5 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node bin/xo-server"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash",
|
||||
"transform-decorators-legacy",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
],
|
||||
"stage-0"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
270
packages/xo-server/src/api/backup-ng.js
Normal file
270
packages/xo-server/src/api/backup-ng.js
Normal file
@@ -0,0 +1,270 @@
|
||||
import { basename } from 'path'
|
||||
|
||||
import { safeDateFormat } from '../utils'
|
||||
|
||||
export function createJob ({ schedules, ...job }) {
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules)
|
||||
}
|
||||
|
||||
createJob.permission = 'admin'
|
||||
createJob.params = {
|
||||
compression: {
|
||||
enum: ['', 'native'],
|
||||
optional: true,
|
||||
},
|
||||
mode: {
|
||||
enum: ['full', 'delta'],
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
remotes: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
schedules: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
settings: {
|
||||
type: 'object',
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
},
|
||||
}
|
||||
|
||||
export function migrateLegacyJob ({ id }) {
|
||||
return this.migrateLegacyBackupJob(id)
|
||||
}
|
||||
migrateLegacyJob.permission = 'admin'
|
||||
migrateLegacyJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function deleteJob ({ id }) {
|
||||
return this.deleteBackupNgJob(id)
|
||||
}
|
||||
deleteJob.permission = 'admin'
|
||||
deleteJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function editJob (props) {
|
||||
return this.updateJob(props)
|
||||
}
|
||||
|
||||
editJob.permission = 'admin'
|
||||
editJob.params = {
|
||||
compression: {
|
||||
enum: ['', 'native'],
|
||||
optional: true,
|
||||
},
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
mode: {
|
||||
enum: ['full', 'delta'],
|
||||
optional: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
remotes: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
settings: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
export function getAllJobs () {
|
||||
return this.getAllJobs('backup')
|
||||
}
|
||||
|
||||
getAllJobs.permission = 'admin'
|
||||
|
||||
export function getJob ({ id }) {
|
||||
return this.getJob(id, 'backup')
|
||||
}
|
||||
|
||||
getJob.permission = 'admin'
|
||||
|
||||
getJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export async function runJob ({ id, schedule }) {
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule))
|
||||
}
|
||||
|
||||
runJob.permission = 'admin'
|
||||
|
||||
runJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
schedule: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function getAllLogs () {
|
||||
return this.getBackupNgLogs()
|
||||
}
|
||||
|
||||
getAllLogs.permission = 'admin'
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function deleteVmBackup ({ id }) {
|
||||
return this.deleteVmBackupNg(id)
|
||||
}
|
||||
|
||||
deleteVmBackup.permission = 'admin'
|
||||
|
||||
deleteVmBackup.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function listVmBackups ({ remotes }) {
|
||||
return this.listVmBackupsNg(remotes)
|
||||
}
|
||||
|
||||
listVmBackups.permission = 'admin'
|
||||
|
||||
listVmBackups.params = {
|
||||
remotes: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
export function importVmBackup ({ id, sr }) {
|
||||
return this.importVmBackupNg(id, sr)
|
||||
}
|
||||
|
||||
importVmBackup.permission = 'admin'
|
||||
|
||||
importVmBackup.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
sr: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function listPartitions ({ remote, disk }) {
|
||||
return this.listBackupNgDiskPartitions(remote, disk)
|
||||
}
|
||||
|
||||
listPartitions.permission = 'admin'
|
||||
|
||||
listPartitions.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function listFiles ({ remote, disk, partition, path }) {
|
||||
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
|
||||
}
|
||||
|
||||
listFiles.permission = 'admin'
|
||||
|
||||
listFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
|
||||
const zipStream = await this.fetchBackupNgPartitionFiles(
|
||||
remote,
|
||||
disk,
|
||||
partition,
|
||||
paths
|
||||
)
|
||||
|
||||
res.setHeader('content-disposition', 'attachment')
|
||||
res.setHeader('content-type', 'application/octet-stream')
|
||||
return zipStream
|
||||
}
|
||||
|
||||
export async function fetchFiles (params) {
|
||||
const { paths } = params
|
||||
let filename = `restore_${safeDateFormat(new Date())}`
|
||||
if (paths.length === 1) {
|
||||
filename += `_${basename(paths[0])}`
|
||||
}
|
||||
filename += '.zip'
|
||||
|
||||
return this.registerHttpRequest(handleFetchFiles, params, {
|
||||
suffix: encodeURI(`/${filename}`),
|
||||
}).then(url => ({ $getFrom: url }))
|
||||
}
|
||||
|
||||
fetchFiles.permission = 'admin'
|
||||
|
||||
fetchFiles.params = {
|
||||
disk: {
|
||||
type: 'string',
|
||||
},
|
||||
partition: {
|
||||
optional: true,
|
||||
type: 'string',
|
||||
},
|
||||
paths: {
|
||||
items: { type: 'string' },
|
||||
minLength: 1,
|
||||
type: 'array',
|
||||
},
|
||||
remote: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
@@ -242,7 +242,7 @@ emergencyShutdownHost.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ host, granularity }) {
|
||||
return this.getXapiHostStats(host, granularity)
|
||||
return this.getXapiHostStats(host._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the host'
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// FIXME so far, no acls for jobs
|
||||
|
||||
export async function getAll () {
|
||||
return /* await */ this.getAllJobs()
|
||||
return /* await */ this.getAllJobs('call')
|
||||
}
|
||||
|
||||
getAll.permission = 'admin'
|
||||
getAll.description = 'Gets all available jobs'
|
||||
|
||||
export async function get (id) {
|
||||
return /* await */ this.getJob(id)
|
||||
return /* await */ this.getJob(id, 'call')
|
||||
}
|
||||
|
||||
get.permission = 'admin'
|
||||
|
||||
@@ -1,19 +1,5 @@
|
||||
export async function get ({ namespace }) {
|
||||
const logger = await this.getLogger(namespace)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const logs = {}
|
||||
|
||||
logger
|
||||
.createReadStream()
|
||||
.on('data', data => {
|
||||
logs[data.key] = data.value
|
||||
})
|
||||
.on('end', () => {
|
||||
resolve(logs)
|
||||
})
|
||||
.on('error', reject)
|
||||
})
|
||||
export function get ({ namespace }) {
|
||||
return this.getLogs(namespace)
|
||||
}
|
||||
|
||||
get.description = 'returns logs list for one namespace'
|
||||
|
||||
@@ -99,11 +99,14 @@ set.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function get ({ id }) {
|
||||
const { user } = this
|
||||
if (!user) {
|
||||
throw unauthorized()
|
||||
}
|
||||
|
||||
return this.getResourceSet(id)
|
||||
}
|
||||
|
||||
get.permission = 'admin'
|
||||
|
||||
get.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
|
||||
@@ -17,41 +17,44 @@ get.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export async function create ({ jobId, cron, enabled, name, timezone }) {
|
||||
return /* await */ this.createSchedule(this.session.get('user_id'), {
|
||||
job: jobId,
|
||||
export function create ({ cron, enabled, jobId, name, timezone }) {
|
||||
return this.createSchedule({
|
||||
cron,
|
||||
enabled,
|
||||
jobId,
|
||||
name,
|
||||
timezone,
|
||||
userId: this.session.get('user_id'),
|
||||
})
|
||||
}
|
||||
|
||||
create.permission = 'admin'
|
||||
create.description = 'Creates a new schedule'
|
||||
create.params = {
|
||||
jobId: { type: 'string' },
|
||||
cron: { type: 'string' },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
jobId: { type: 'string' },
|
||||
name: { type: 'string', optional: true },
|
||||
timezone: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
export async function set ({ id, jobId, cron, enabled, name, timezone }) {
|
||||
await this.updateSchedule(id, { job: jobId, cron, enabled, name, timezone })
|
||||
export async function set ({ cron, enabled, id, jobId, name, timezone }) {
|
||||
await this.updateSchedule({ cron, enabled, id, jobId, name, timezone })
|
||||
}
|
||||
|
||||
set.permission = 'admin'
|
||||
set.description = 'Modifies an existing schedule'
|
||||
set.params = {
|
||||
id: { type: 'string' },
|
||||
jobId: { type: 'string', optional: true },
|
||||
cron: { type: 'string', optional: true },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
id: { type: 'string' },
|
||||
jobId: { type: 'string', optional: true },
|
||||
name: { type: 'string', optional: true },
|
||||
timezone: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
async function delete_ ({ id }) {
|
||||
await this.removeSchedule(id)
|
||||
await this.deleteSchedule(id)
|
||||
}
|
||||
|
||||
delete_.permission = 'admin'
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
export async function enable ({ id }) {
|
||||
const schedule = await this.getSchedule(id)
|
||||
schedule.enabled = true
|
||||
await this.updateSchedule(id, schedule)
|
||||
}
|
||||
|
||||
enable.permission = 'admin'
|
||||
enable.description = "Enables a schedule to run it's job as scheduled"
|
||||
enable.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export async function disable ({ id }) {
|
||||
const schedule = await this.getSchedule(id)
|
||||
schedule.enabled = false
|
||||
await this.updateSchedule(id, schedule)
|
||||
}
|
||||
|
||||
disable.permission = 'admin'
|
||||
disable.description = 'Disables a schedule'
|
||||
disable.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export function getScheduleTable () {
|
||||
return this.scheduleTable
|
||||
}
|
||||
|
||||
disable.permission = 'admin'
|
||||
disable.description = 'Get a map of existing schedules enabled/disabled state'
|
||||
@@ -189,6 +189,7 @@ export async function createNfs ({
|
||||
server,
|
||||
serverPath,
|
||||
nfsVersion,
|
||||
nfsOptions,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
@@ -202,6 +203,11 @@ export async function createNfs ({
|
||||
deviceConfig.nfsversion = nfsVersion
|
||||
}
|
||||
|
||||
// if NFS options given
|
||||
if (nfsVersion) {
|
||||
deviceConfig.options = nfsVersion
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
@@ -226,6 +232,7 @@ createNfs.params = {
|
||||
server: { type: 'string' },
|
||||
serverPath: { type: 'string' },
|
||||
nfsVersion: { type: 'string', optional: true },
|
||||
nfsOptions: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
createNfs.resolve = {
|
||||
@@ -241,7 +248,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
scsiId,
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
@@ -251,7 +258,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvmoohba', // SR LVM over HBA
|
||||
'lvmohba', // SR LVM over HBA
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
@@ -366,7 +373,7 @@ export async function probeHba ({ host }) {
|
||||
let xml
|
||||
|
||||
try {
|
||||
await xapi.call('SR.probe', host._xapiRef, 'type', {})
|
||||
await xapi.call('SR.probe', host._xapiRef, {}, 'lvmohba', {})
|
||||
|
||||
throw new Error('the call above should have thrown an error')
|
||||
} catch (error) {
|
||||
@@ -382,7 +389,7 @@ export async function probeHba ({ host }) {
|
||||
hbaDevices.push({
|
||||
hba: hbaDevice.hba.trim(),
|
||||
path: hbaDevice.path.trim(),
|
||||
scsciId: hbaDevice.SCSIid.trim(),
|
||||
scsiId: hbaDevice.SCSIid.trim(),
|
||||
size: hbaDevice.size.trim(),
|
||||
vendor: hbaDevice.vendor.trim(),
|
||||
})
|
||||
@@ -487,8 +494,8 @@ export async function probeIscsiIqns ({
|
||||
|
||||
// if we give user and password
|
||||
if (chapUser && chapPassword) {
|
||||
deviceConfig.chapUser = chapUser
|
||||
deviceConfig.chapPassword = chapPassword
|
||||
deviceConfig.chapuser = chapUser
|
||||
deviceConfig.chappassword = chapPassword
|
||||
}
|
||||
|
||||
// if we give another port than default iSCSI
|
||||
@@ -668,6 +675,34 @@ probeIscsiExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this HBA already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
|
||||
export async function probeHbaExists ({ host, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const xml = parseXml(
|
||||
await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'lvmohba', {})
|
||||
)
|
||||
|
||||
// get the UUID of SR connected to this LUN
|
||||
return ensureArray(xml.SRlist.SR).map(sr => ({ uuid: sr.UUID.trim() }))
|
||||
}
|
||||
|
||||
probeHbaExists.params = {
|
||||
host: { type: 'string' },
|
||||
scsiId: { type: 'string' },
|
||||
}
|
||||
|
||||
probeHbaExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this NFS SR already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
@@ -803,3 +838,23 @@ getUnhealthyVdiChainsLength.params = {
|
||||
getUnhealthyVdiChainsLength.resolve = {
|
||||
sr: ['id', 'SR', 'operate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ sr, granularity }) {
|
||||
return this.getXapiSrStats(sr._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the sr'
|
||||
|
||||
stats.params = {
|
||||
id: { type: 'string' },
|
||||
granularity: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
stats.resolve = {
|
||||
sr: ['id', 'SR', 'view'],
|
||||
}
|
||||
|
||||
@@ -65,7 +65,11 @@ export async function copyVm ({ vm, sr }) {
|
||||
console.log('export delta VM...')
|
||||
const input = await srcXapi.exportDeltaVm(vm)
|
||||
console.log('import delta VM...')
|
||||
await tgtXapi.deleteVm(await tgtXapi.importDeltaVm(input, { srId: sr }))
|
||||
const { transferSize, vm: copyVm } = await tgtXapi.importDeltaVm(input, {
|
||||
srId: sr,
|
||||
})
|
||||
console.log('transfered size:', transferSize)
|
||||
await tgtXapi.deleteVm(copyVm)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -151,21 +151,21 @@ export async function create (params) {
|
||||
await Promise.all([
|
||||
params.share
|
||||
? Promise.all(
|
||||
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
|
||||
this.addAcl(subjectId, vm.id, 'admin')
|
||||
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
|
||||
this.addAcl(subjectId, vm.id, 'admin')
|
||||
)
|
||||
)
|
||||
)
|
||||
: this.addAcl(user.id, vm.id, 'admin'),
|
||||
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet),
|
||||
])
|
||||
}
|
||||
|
||||
for (const vifId of vm.VIFs) {
|
||||
const vif = this.getObject(vifId, 'VIF')
|
||||
for (const vif of xapiVm.$VIFs) {
|
||||
xapi.xo.addObject(vif)
|
||||
await this.allocIpAddresses(
|
||||
vifId,
|
||||
concat(vif.allowedIpv4Addresses, vif.allowedIpv6Addresses)
|
||||
).catch(() => xapi.deleteVif(vif._xapiId))
|
||||
vif.$id,
|
||||
concat(vif.ipv4_allowed, vif.ipv6_allowed)
|
||||
).catch(() => xapi.deleteVif(vif))
|
||||
}
|
||||
|
||||
if (params.bootAfterCreate) {
|
||||
@@ -323,6 +323,7 @@ create.resolve = {
|
||||
async function delete_ ({
|
||||
delete_disks, // eslint-disable-line camelcase
|
||||
force,
|
||||
forceDeleteDefaultTemplate,
|
||||
vm,
|
||||
|
||||
deleteDisks = delete_disks,
|
||||
@@ -356,12 +357,19 @@ async function delete_ ({
|
||||
)
|
||||
|
||||
// Update resource sets
|
||||
const resourceSet = xapi.xo.getData(vm._xapiId, 'resourceSet')
|
||||
if (resourceSet != null) {
|
||||
if (
|
||||
vm.type === 'VM' && // only regular VMs
|
||||
xapi.xo.getData(vm._xapiId, 'resourceSet') != null
|
||||
) {
|
||||
;this.setVmResourceSet(vm._xapiId, null)::ignoreErrors()
|
||||
}
|
||||
|
||||
return xapi.deleteVm(vm._xapiId, deleteDisks, force)
|
||||
return xapi.deleteVm(
|
||||
vm._xapiId,
|
||||
deleteDisks,
|
||||
force,
|
||||
forceDeleteDefaultTemplate
|
||||
)
|
||||
}
|
||||
|
||||
delete_.params = {
|
||||
@@ -376,6 +384,11 @@ delete_.params = {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
|
||||
forceDeleteDefaultTemplate: {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
}
|
||||
delete_.resolve = {
|
||||
vm: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate'],
|
||||
@@ -411,7 +424,9 @@ insertCd.params = {
|
||||
|
||||
insertCd.resolve = {
|
||||
vm: ['id', 'VM', 'operate'],
|
||||
vdi: ['cd_id', 'VDI', 'view'],
|
||||
// Not compatible with resource sets.
|
||||
// FIXME: find a workaround.
|
||||
vdi: ['cd_id', 'VDI', ''],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -582,6 +597,9 @@ set.params = {
|
||||
|
||||
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
|
||||
|
||||
// Emulate HVM C000 PCI device for Windows Update to fetch or update PV drivers
|
||||
hasVendorDevice: { type: 'boolean', optional: true },
|
||||
|
||||
// Move the vm In to/Out of Self Service
|
||||
resourceSet: { type: ['string', 'null'], optional: true },
|
||||
|
||||
@@ -773,7 +791,7 @@ export function importDeltaBackup ({ sr, remote, filePath, mapVdisSrs }) {
|
||||
remoteId: remote,
|
||||
filePath,
|
||||
mapVdisSrs: mapVdisSrsXapi,
|
||||
})
|
||||
}).then(_ => _.vm)
|
||||
}
|
||||
|
||||
importDeltaBackup.params = {
|
||||
@@ -1054,12 +1072,12 @@ export function revert ({ snapshot, snapshotBefore }) {
|
||||
}
|
||||
|
||||
revert.params = {
|
||||
id: { type: 'string' },
|
||||
snapshot: { type: 'string' },
|
||||
snapshotBefore: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
revert.resolve = {
|
||||
snapshot: ['id', 'VM-snapshot', 'administrate'],
|
||||
snapshot: ['snapshot', 'VM-snapshot', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -1336,7 +1354,7 @@ detachPci.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ vm, granularity }) {
|
||||
return this.getXapiVmStats(vm, granularity)
|
||||
return this.getXapiVmStats(vm._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistics about the VM'
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import getStream from 'get-stream'
|
||||
import { forEach } from 'lodash'
|
||||
|
||||
import { streamToBuffer } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export function clean () {
|
||||
@@ -42,7 +41,9 @@ function handleGetAllObjects (req, res, { filter, limit }) {
|
||||
|
||||
export function getAllObjects ({ filter, limit, ndjson = false }) {
|
||||
return ndjson
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then($getFrom => ({ $getFrom }))
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then(
|
||||
$getFrom => ({ $getFrom })
|
||||
)
|
||||
: this.getObjects({ filter, limit })
|
||||
}
|
||||
|
||||
@@ -59,7 +60,7 @@ getAllObjects.params = {
|
||||
export async function importConfig () {
|
||||
return {
|
||||
$sendTo: await this.registerHttpRequest(async (req, res) => {
|
||||
await this.importConfig(JSON.parse(await streamToBuffer(req)))
|
||||
await this.importConfig(JSON.parse(await getStream.buffer(req)))
|
||||
|
||||
res.end('config successfully imported')
|
||||
}),
|
||||
|
||||
@@ -665,7 +665,9 @@ export const createSR = defer(async function (
|
||||
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 0 }
|
||||
|
||||
const tmpBoundObjectId = srs.join(',')
|
||||
const tmpBoundObjectId = `tmp_${srs.join(',')}_${Math.random()
|
||||
.toString(32)
|
||||
.slice(2)}`
|
||||
const license = await this.createBoundXosanTrialLicense({
|
||||
boundObjectId: tmpBoundObjectId,
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { EventEmitter } from 'events'
|
||||
|
||||
import { createRawObject, noop } from './utils'
|
||||
import { noop } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -8,7 +8,7 @@ export default class Connection extends EventEmitter {
|
||||
constructor () {
|
||||
super()
|
||||
|
||||
this._data = createRawObject()
|
||||
this._data = { __proto__: null }
|
||||
}
|
||||
|
||||
// Close the connection.
|
||||
|
||||
@@ -8,7 +8,7 @@ describe('debounce()', () => {
|
||||
let i
|
||||
|
||||
class Foo {
|
||||
@debounce(1e1)
|
||||
@debounce(10)
|
||||
foo () {
|
||||
++i
|
||||
}
|
||||
@@ -18,22 +18,28 @@ describe('debounce()', () => {
|
||||
i = 0
|
||||
})
|
||||
|
||||
it('works', done => {
|
||||
const foo = new Foo()
|
||||
it('works', () => {
|
||||
const savedNow = Date.now
|
||||
try {
|
||||
const now = Date.now()
|
||||
const mockDate = jest.fn()
|
||||
Date.now = mockDate
|
||||
const foo = new Foo()
|
||||
expect(i).toBe(0)
|
||||
|
||||
expect(i).toBe(0)
|
||||
mockDate.mockReturnValueOnce(now)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
mockDate.mockReturnValueOnce(now + 2)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
setTimeout(() => {
|
||||
mockDate.mockReturnValueOnce(now + 2 + 10)
|
||||
foo.foo()
|
||||
expect(i).toBe(2)
|
||||
|
||||
done()
|
||||
}, 2e1)
|
||||
} finally {
|
||||
Date.now = savedNow
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,16 +3,17 @@ import bind from 'lodash/bind'
|
||||
import blocked from 'blocked'
|
||||
import createExpress from 'express'
|
||||
import createLogger from 'debug'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import has from 'lodash/has'
|
||||
import helmet from 'helmet'
|
||||
import includes from 'lodash/includes'
|
||||
import proxyConsole from './proxy-console'
|
||||
import serveStatic from 'serve-static'
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import stoppable from 'stoppable'
|
||||
import WebSocket from 'ws'
|
||||
import { compile as compilePug } from 'pug'
|
||||
import { createServer as createProxyServer } from 'http-proxy'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { join as joinPath } from 'path'
|
||||
|
||||
import JsonRpcPeer from 'json-rpc-peer'
|
||||
@@ -22,7 +23,6 @@ import { ensureDir, readdir, readFile } from 'fs-extra'
|
||||
import WebServer from 'http-server-plus'
|
||||
import Xo from './xo'
|
||||
import {
|
||||
createRawObject,
|
||||
forEach,
|
||||
isArray,
|
||||
isFunction,
|
||||
@@ -103,7 +103,7 @@ function createExpressApp () {
|
||||
}
|
||||
|
||||
async function setUpPassport (express, xo) {
|
||||
const strategies = createRawObject()
|
||||
const strategies = { __proto__: null }
|
||||
xo.registerPassportStrategy = strategy => {
|
||||
passport.use(strategy)
|
||||
|
||||
@@ -333,7 +333,7 @@ async function makeWebServerListen (
|
||||
}
|
||||
|
||||
async function createWebServer ({ listen, listenOptions }) {
|
||||
const webServer = new WebServer()
|
||||
const webServer = stoppable(new WebServer())
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(listen, opts =>
|
||||
@@ -538,9 +538,14 @@ export default async function main (args) {
|
||||
|
||||
{
|
||||
const debug = createLogger('xo:perf')
|
||||
blocked(ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
})
|
||||
blocked(
|
||||
ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
},
|
||||
{
|
||||
threshold: 50,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
const config = await loadConfiguration()
|
||||
@@ -566,7 +571,7 @@ export default async function main (args) {
|
||||
const xo = new Xo(config)
|
||||
|
||||
// Register web server close on XO stop.
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.close(cb)))
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.stop(cb)))
|
||||
|
||||
// Connects to all registered servers.
|
||||
await xo.start()
|
||||
@@ -645,7 +650,7 @@ export default async function main (args) {
|
||||
})
|
||||
})
|
||||
|
||||
await eventToPromise(xo, 'stopped')
|
||||
await fromEvent(xo, 'stopped')
|
||||
|
||||
debug('bye :-)')
|
||||
}
|
||||
|
||||
@@ -1,186 +0,0 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { timeout } from 'promise-toolbox'
|
||||
import { assign, filter, find, isEmpty, map, mapValues } from 'lodash'
|
||||
|
||||
import { crossProduct } from './math'
|
||||
import { asyncMap, serializeError, thunkToArray } from './utils'
|
||||
|
||||
export class JobExecutorError extends BaseError {}
|
||||
export class UnsupportedJobType extends JobExecutorError {
|
||||
constructor (job) {
|
||||
super('Unknown job type: ' + job.type)
|
||||
}
|
||||
}
|
||||
export class UnsupportedVectorType extends JobExecutorError {
|
||||
constructor (vector) {
|
||||
super('Unknown vector type: ' + vector.type)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const paramsVectorActionsMap = {
|
||||
extractProperties ({ mapping, value }) {
|
||||
return mapValues(mapping, key => value[key])
|
||||
},
|
||||
crossProduct ({ items }) {
|
||||
return thunkToArray(
|
||||
crossProduct(map(items, value => resolveParamsVector.call(this, value)))
|
||||
)
|
||||
},
|
||||
fetchObjects ({ pattern }) {
|
||||
const objects = filter(this.xo.getObjects(), createPredicate(pattern))
|
||||
if (isEmpty(objects)) {
|
||||
throw new Error('no objects match this pattern')
|
||||
}
|
||||
return objects
|
||||
},
|
||||
map ({ collection, iteratee, paramName = 'value' }) {
|
||||
return map(resolveParamsVector.call(this, collection), value => {
|
||||
return resolveParamsVector.call(this, {
|
||||
...iteratee,
|
||||
[paramName]: value,
|
||||
})
|
||||
})
|
||||
},
|
||||
set: ({ values }) => values,
|
||||
}
|
||||
|
||||
export function resolveParamsVector (paramsVector) {
|
||||
const visitor = paramsVectorActionsMap[paramsVector.type]
|
||||
if (!visitor) {
|
||||
throw new Error(`Unsupported function '${paramsVector.type}'.`)
|
||||
}
|
||||
|
||||
return visitor.call(this, paramsVector)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class JobExecutor {
|
||||
constructor (xo) {
|
||||
this.xo = xo
|
||||
|
||||
// The logger is not available until Xo has started.
|
||||
xo.on('start', () =>
|
||||
xo.getLogger('jobs').then(logger => {
|
||||
this._logger = logger
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async exec (job) {
|
||||
const runJobId = this._logger.notice(`Starting execution of ${job.id}.`, {
|
||||
event: 'job.start',
|
||||
userId: job.userId,
|
||||
jobId: job.id,
|
||||
key: job.key,
|
||||
})
|
||||
|
||||
try {
|
||||
if (job.type === 'call') {
|
||||
const execStatus = await this._execCall(job, runJobId)
|
||||
|
||||
this.xo.emit('job:terminated', execStatus)
|
||||
} else {
|
||||
throw new UnsupportedJobType(job)
|
||||
}
|
||||
|
||||
this._logger.notice(`Execution terminated for ${job.id}.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
})
|
||||
} catch (error) {
|
||||
this._logger.error(`The execution of ${job.id} has failed.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
error: serializeError(error),
|
||||
})
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _execCall (job, runJobId) {
|
||||
const { paramsVector } = job
|
||||
const paramsFlatVector = paramsVector
|
||||
? resolveParamsVector.call(this, paramsVector)
|
||||
: [{}] // One call with no parameters
|
||||
|
||||
const connection = this.xo.createUserConnection()
|
||||
|
||||
connection.set('user_id', job.userId)
|
||||
|
||||
const schedule = find(await this.xo.getAllSchedules(), { job: job.id })
|
||||
|
||||
const execStatus = {
|
||||
calls: {},
|
||||
runJobId,
|
||||
start: Date.now(),
|
||||
timezone: schedule !== undefined ? schedule.timezone : undefined,
|
||||
}
|
||||
|
||||
await asyncMap(paramsFlatVector, params => {
|
||||
const runCallId = this._logger.notice(
|
||||
`Starting ${job.method} call. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.start',
|
||||
runJobId,
|
||||
method: job.method,
|
||||
params,
|
||||
}
|
||||
)
|
||||
|
||||
const call = (execStatus.calls[runCallId] = {
|
||||
method: job.method,
|
||||
params,
|
||||
start: Date.now(),
|
||||
})
|
||||
let promise = this.xo.callApiMethod(
|
||||
connection,
|
||||
job.method,
|
||||
assign({}, params)
|
||||
)
|
||||
if (job.timeout) {
|
||||
promise = promise::timeout(job.timeout)
|
||||
}
|
||||
|
||||
return promise.then(
|
||||
value => {
|
||||
this._logger.notice(
|
||||
`Call ${job.method} (${runCallId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
returnedValue: value,
|
||||
}
|
||||
)
|
||||
|
||||
call.returnedValue = value
|
||||
call.end = Date.now()
|
||||
},
|
||||
reason => {
|
||||
this._logger.notice(
|
||||
`Call ${job.method} (${runCallId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
error: serializeError(reason),
|
||||
}
|
||||
)
|
||||
|
||||
call.error = reason
|
||||
call.end = Date.now()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
connection.close()
|
||||
execStatus.end = Date.now()
|
||||
|
||||
return execStatus
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,15 @@
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { isArray, map } from 'lodash'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => (fields, ...args) =>
|
||||
execa
|
||||
.stdout(command, [
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
return splitLines(
|
||||
await execa.stdout(command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
.then(stdout =>
|
||||
map(
|
||||
splitLines(stdout),
|
||||
isArray(fields)
|
||||
? parse
|
||||
: line => {
|
||||
const data = parse(line)
|
||||
return data[fields]
|
||||
}
|
||||
)
|
||||
)
|
||||
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
|
||||
import { parseProp } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Job extends Model {}
|
||||
|
||||
export class Jobs extends Collection {
|
||||
get Model () {
|
||||
return Job
|
||||
}
|
||||
|
||||
async create (job) {
|
||||
// Serializes.
|
||||
job.paramsVector = JSON.stringify(job.paramsVector)
|
||||
return /* await */ this.add(new Job(job))
|
||||
}
|
||||
|
||||
async save (job) {
|
||||
// Serializes.
|
||||
job.paramsVector = JSON.stringify(job.paramsVector)
|
||||
return /* await */ this.update(job)
|
||||
}
|
||||
|
||||
async get (properties) {
|
||||
const jobs = await super.get(properties)
|
||||
|
||||
// Deserializes.
|
||||
forEach(jobs, job => {
|
||||
job.paramsVector = parseProp('job', job, 'paramsVector', {})
|
||||
|
||||
const { timeout } = job
|
||||
if (timeout !== undefined) {
|
||||
job.timeout = +timeout
|
||||
}
|
||||
})
|
||||
|
||||
return jobs
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user