Compare commits
218 Commits
value-matc
...
xo-web-v5.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e2404a0d7 | ||
|
|
7dd84d1518 | ||
|
|
d800db5d09 | ||
|
|
2714ccff38 | ||
|
|
1d493e411b | ||
|
|
2a0c222f2d | ||
|
|
641d68de0e | ||
|
|
2dd0fd660b | ||
|
|
bb5441c7bc | ||
|
|
eeea9e662b | ||
|
|
8d4874e240 | ||
|
|
a8ba4a1a8e | ||
|
|
0c027247ec | ||
|
|
164cb39c1b | ||
|
|
52503de645 | ||
|
|
83b8b5de61 | ||
|
|
3e326c4e62 | ||
|
|
a6b0690416 | ||
|
|
dcd007c5c7 | ||
|
|
eb090e4874 | ||
|
|
4b716584f7 | ||
|
|
4bc348f39f | ||
|
|
9c75992fe4 | ||
|
|
4bb2702ac5 | ||
|
|
ea8133cb41 | ||
|
|
fc40c7b03d | ||
|
|
7fe5b66fdb | ||
|
|
0f1d052493 | ||
|
|
56a182f795 | ||
|
|
e8da1b943b | ||
|
|
3913b0eba1 | ||
|
|
7990e45095 | ||
|
|
a7068ec166 | ||
|
|
55b35ac0cf | ||
|
|
a251f8ca75 | ||
|
|
172ce2c7a1 | ||
|
|
3cef668a75 | ||
|
|
e6deb29070 | ||
|
|
51609d45a2 | ||
|
|
5cb6dc6d92 | ||
|
|
c5174a61b7 | ||
|
|
93e987982c | ||
|
|
fc421428fd | ||
|
|
7400bd657a | ||
|
|
da62cba3f8 | ||
|
|
461cc7e547 | ||
|
|
b898ed4785 | ||
|
|
149530e73f | ||
|
|
7e627c953e | ||
|
|
bc86984f19 | ||
|
|
e40f3acdd4 | ||
|
|
63d93224e0 | ||
|
|
c87356c319 | ||
|
|
74f4a83aea | ||
|
|
e67038a04d | ||
|
|
1fa73b57a2 | ||
|
|
73c746fdd3 | ||
|
|
ab1413b741 | ||
|
|
c087eaf229 | ||
|
|
8b9f9ffa3e | ||
|
|
a83fa90d87 | ||
|
|
505f06c1d8 | ||
|
|
2ac1093543 | ||
|
|
b3d8ce2041 | ||
|
|
b47789bf82 | ||
|
|
0a5e1a9bce | ||
|
|
f333679319 | ||
|
|
20d3faa306 | ||
|
|
cf11ed0830 | ||
|
|
acd390ac42 | ||
|
|
8a2fbe3ab5 | ||
|
|
7a6e7ec153 | ||
|
|
7d90346c91 | ||
|
|
abb5193ced | ||
|
|
52e845834e | ||
|
|
c1c17fad44 | ||
|
|
d7b4025893 | ||
|
|
934356571c | ||
|
|
738d98eb42 | ||
|
|
7e689076d8 | ||
|
|
0b9d031965 | ||
|
|
53f470518b | ||
|
|
664d648435 | ||
|
|
0d718bd632 | ||
|
|
ed5e0c3509 | ||
|
|
20d5047b55 | ||
|
|
4cfe3ec06e | ||
|
|
87664ff16a | ||
|
|
adf278fc83 | ||
|
|
a4d0fa62d2 | ||
|
|
ff59d091f1 | ||
|
|
4cac99d79a | ||
|
|
d1a046279d | ||
|
|
cb9fa5c42b | ||
|
|
05f9e6895b | ||
|
|
63b5ee6f96 | ||
|
|
36d2de049f | ||
|
|
86b0d5e2b7 | ||
|
|
d34f641130 | ||
|
|
39d7b4c7bd | ||
|
|
ad0d4156fb | ||
|
|
80187e2789 | ||
|
|
89e25c9b81 | ||
|
|
ca51d59815 | ||
|
|
433f445e99 | ||
|
|
474a765e1b | ||
|
|
7d4b17380d | ||
|
|
b58b1d94cd | ||
|
|
16e7257e3b | ||
|
|
ca1a46f980 | ||
|
|
596bd12f59 | ||
|
|
301ab65c01 | ||
|
|
35f210e074 | ||
|
|
c239b518e0 | ||
|
|
f45935aa44 | ||
|
|
782505b292 | ||
|
|
1368e3b86d | ||
|
|
ab9c24401e | ||
|
|
831f4e48d1 | ||
|
|
f5511449af | ||
|
|
80c1e39b53 | ||
|
|
3ce4e86784 | ||
|
|
fb617418bb | ||
|
|
9fb0f793b2 | ||
|
|
3b21a097ab | ||
|
|
ef09a42a89 | ||
|
|
74d8f2a859 | ||
|
|
48910f9c0f | ||
|
|
788a1accbd | ||
|
|
b254e7e852 | ||
|
|
e288fa1b8a | ||
|
|
eb9ec68494 | ||
|
|
10ab4f2d79 | ||
|
|
b1986dc275 | ||
|
|
831e36ae5f | ||
|
|
77a2d37d98 | ||
|
|
37b90e25dc | ||
|
|
41f16846b6 | ||
|
|
3e89c62e72 | ||
|
|
b7d3762c06 | ||
|
|
481bc9430a | ||
|
|
13f2470887 | ||
|
|
0308fe4e6e | ||
|
|
197273193e | ||
|
|
e4b11a793b | ||
|
|
927d3135c4 | ||
|
|
aa533c20d6 | ||
|
|
7fd615525a | ||
|
|
6abf3fc0af | ||
|
|
6bb0929822 | ||
|
|
feebc04e55 | ||
|
|
2d406cd7c1 | ||
|
|
788bfe632f | ||
|
|
1149102f37 | ||
|
|
8bd949f618 | ||
|
|
489b142a66 | ||
|
|
cbbbb6da4f | ||
|
|
6701c7e3af | ||
|
|
ecd460a991 | ||
|
|
b4d7648ffe | ||
|
|
eb3dfb0f30 | ||
|
|
2b9ba69480 | ||
|
|
8f784162ea | ||
|
|
a2ab64b142 | ||
|
|
052817ccbf | ||
|
|
48b2297bc1 | ||
|
|
e76a0ad4bd | ||
|
|
baf6d30348 | ||
|
|
7d250dd90b | ||
|
|
efaabb02e8 | ||
|
|
0c3b98d451 | ||
|
|
28d1539ea6 | ||
|
|
8ad02d2d51 | ||
|
|
1947a066e0 | ||
|
|
d99e643634 | ||
|
|
65e1ac2ef9 | ||
|
|
64a768090f | ||
|
|
488eed046e | ||
|
|
dccddd78a6 | ||
|
|
3c247abcf9 | ||
|
|
db795e91fd | ||
|
|
f060f56c93 | ||
|
|
51be573f5e | ||
|
|
4257cbb618 | ||
|
|
e25d6b712d | ||
|
|
b499d60130 | ||
|
|
68e06303a4 | ||
|
|
60085798f2 | ||
|
|
c62cab39f1 | ||
|
|
30483ab2d9 | ||
|
|
c38c716616 | ||
|
|
ded1127d64 | ||
|
|
38d6130e89 | ||
|
|
ee47e40d1a | ||
|
|
80e66415d7 | ||
|
|
81e6372070 | ||
|
|
dbfbd42d29 | ||
|
|
e0d34b1747 | ||
|
|
9a8f9dd1d7 | ||
|
|
75521f8757 | ||
|
|
11d4cb2f04 | ||
|
|
d90cb09b56 | ||
|
|
a02d393457 | ||
|
|
01a5963947 | ||
|
|
7ef314d9f4 | ||
|
|
2ff25d1f61 | ||
|
|
ede12b6732 | ||
|
|
8a010f62fd | ||
|
|
51da4a7e70 | ||
|
|
fd2580f5da | ||
|
|
c5fdab7d47 | ||
|
|
ae094438b1 | ||
|
|
3e5af9e894 | ||
|
|
10093afb91 | ||
|
|
58032738b9 | ||
|
|
89cbbaeeea | ||
|
|
5ca08eb400 | ||
|
|
fad049d2ac |
@@ -2,13 +2,21 @@ module.exports = {
|
||||
extends: ['standard', 'standard-jsx'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
$Diff: true,
|
||||
$Exact: true,
|
||||
$Keys: true,
|
||||
$PropertyType: true,
|
||||
$Shape: true,
|
||||
},
|
||||
parser: 'babel-eslint',
|
||||
rules: {
|
||||
'comma-dangle': ['error', 'always-multiline'],
|
||||
indent: 'off',
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
'react/jsx-indent': 'off',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
[lints]
|
||||
|
||||
[options]
|
||||
esproposal.decorators=ignore
|
||||
include_warnings=true
|
||||
module.use_strict=true
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
module.exports = {
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
}
|
||||
|
||||
@@ -7,6 +7,11 @@ node_js:
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
@@ -14,3 +19,7 @@ before_install:
|
||||
|
||||
cache:
|
||||
yarn: true
|
||||
|
||||
script:
|
||||
- yarn run test
|
||||
- yarn run test-integration
|
||||
|
||||
47
@xen-orchestra/babel-config/index.js
Normal file
47
@xen-orchestra/babel-config/index.js
Normal file
@@ -0,0 +1,47 @@
|
||||
'use strict'
|
||||
|
||||
const PLUGINS_RE = /^(?:@babel\/plugin-.+|babel-plugin-lodash)$/
|
||||
const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
|
||||
presets === undefined && (presets = {})
|
||||
presets['@babel/preset-env'] = {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
}
|
||||
11
@xen-orchestra/babel-config/package.json
Normal file
11
@xen-orchestra/babel-config/package.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"private": true,
|
||||
"name": "@xen-orchestra/babel-config",
|
||||
"version": "0.0.0",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
}
|
||||
}
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@xen-orchestra/cron",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.3",
|
||||
"license": "ISC",
|
||||
"description": "Focused, well maintained, cron parser/scheduler",
|
||||
"keywords": [
|
||||
@@ -14,7 +14,7 @@
|
||||
"scheduling",
|
||||
"task"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -38,13 +38,13 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"luxon": "^0.5.2"
|
||||
"moment-timezone": "^0.5.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { DateTime } from 'luxon'
|
||||
import moment from 'moment-timezone'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
@@ -41,7 +41,10 @@ class Job {
|
||||
class Schedule {
|
||||
constructor (pattern, zone = 'utc') {
|
||||
this._schedule = parse(pattern)
|
||||
this._dateTimeOpts = { zone }
|
||||
this._createDate =
|
||||
zone.toLowerCase() === 'utc'
|
||||
? moment.utc
|
||||
: zone === 'local' ? moment : () => moment.tz(zone)
|
||||
}
|
||||
|
||||
createJob (fn) {
|
||||
@@ -51,15 +54,15 @@ class Schedule {
|
||||
next (n) {
|
||||
const dates = new Array(n)
|
||||
const schedule = this._schedule
|
||||
let date = DateTime.fromObject(this._dateTimeOpts)
|
||||
let date = this._createDate()
|
||||
for (let i = 0; i < n; ++i) {
|
||||
dates[i] = (date = next(schedule, date)).toJSDate()
|
||||
dates[i] = (date = next(schedule, date)).toDate()
|
||||
}
|
||||
return dates
|
||||
}
|
||||
|
||||
_nextDelay () {
|
||||
const now = DateTime.fromObject(this._dateTimeOpts)
|
||||
const now = this._createDate()
|
||||
return next(this._schedule, now) - now
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import moment from 'moment-timezone'
|
||||
import sortedIndex from 'lodash/sortedIndex'
|
||||
import { DateTime } from 'luxon'
|
||||
|
||||
const NEXT_MAPPING = {
|
||||
month: { year: 1 },
|
||||
day: { month: 1 },
|
||||
weekday: { week: 1 },
|
||||
date: { month: 1 },
|
||||
day: { week: 1 },
|
||||
hour: { day: 1 },
|
||||
minute: { hour: 1 },
|
||||
}
|
||||
@@ -13,38 +13,37 @@ const getFirst = values => (values !== undefined ? values[0] : 0)
|
||||
|
||||
const setFirstAvailable = (date, unit, values) => {
|
||||
if (values === undefined) {
|
||||
return date
|
||||
return
|
||||
}
|
||||
|
||||
const curr = date.get(unit)
|
||||
const next = values[sortedIndex(values, curr) % values.length]
|
||||
if (curr === next) {
|
||||
return date
|
||||
return
|
||||
}
|
||||
|
||||
const newDate = date.set({ [unit]: next })
|
||||
return newDate > date ? newDate : newDate.plus(NEXT_MAPPING[unit])
|
||||
const timestamp = +date
|
||||
date.set(unit, next)
|
||||
if (timestamp > +date) {
|
||||
date.add(NEXT_MAPPING[unit])
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// returns the next run, after the passed date
|
||||
export default (schedule, fromDate) => {
|
||||
let date = fromDate
|
||||
let date = moment(fromDate)
|
||||
.set({
|
||||
second: 0,
|
||||
millisecond: 0,
|
||||
})
|
||||
.plus({ minute: 1 })
|
||||
.add({ minute: 1 })
|
||||
|
||||
const { minute, hour, dayOfMonth, month, dayOfWeek } = schedule
|
||||
date = setFirstAvailable(date, 'minute', minute)
|
||||
setFirstAvailable(date, 'minute', minute)
|
||||
|
||||
let tmp
|
||||
|
||||
tmp = setFirstAvailable(date, 'hour', hour)
|
||||
if (tmp !== date) {
|
||||
date = tmp.set({
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
if (setFirstAvailable(date, 'hour', hour)) {
|
||||
date.set('minute', getFirst(minute))
|
||||
}
|
||||
|
||||
let loop
|
||||
@@ -52,30 +51,30 @@ export default (schedule, fromDate) => {
|
||||
do {
|
||||
loop = false
|
||||
|
||||
tmp = setFirstAvailable(date, 'month', month)
|
||||
if (tmp !== date) {
|
||||
date = tmp.set({
|
||||
day: 1,
|
||||
if (setFirstAvailable(date, 'month', month)) {
|
||||
date.set({
|
||||
date: 1,
|
||||
hour: getFirst(hour),
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
}
|
||||
|
||||
let newDate = date.clone()
|
||||
if (dayOfMonth === undefined) {
|
||||
if (dayOfWeek !== undefined) {
|
||||
tmp = setFirstAvailable(date, 'weekday', dayOfWeek)
|
||||
setFirstAvailable(newDate, 'day', dayOfWeek)
|
||||
}
|
||||
} else if (dayOfWeek === undefined) {
|
||||
tmp = setFirstAvailable(date, 'day', dayOfMonth)
|
||||
setFirstAvailable(newDate, 'date', dayOfMonth)
|
||||
} else {
|
||||
tmp = DateTime.min(
|
||||
setFirstAvailable(date, 'day', dayOfMonth),
|
||||
setFirstAvailable(date, 'weekday', dayOfWeek)
|
||||
)
|
||||
const dateDay = newDate.clone()
|
||||
setFirstAvailable(dateDay, 'date', dayOfMonth)
|
||||
setFirstAvailable(newDate, 'day', dayOfWeek)
|
||||
newDate = moment.min(dateDay, newDate)
|
||||
}
|
||||
if (tmp !== date) {
|
||||
loop = tmp.month !== date.month
|
||||
date = tmp.set({
|
||||
if (+date !== +newDate) {
|
||||
loop = date.month() !== newDate.month()
|
||||
date = newDate.set({
|
||||
hour: getFirst(hour),
|
||||
minute: getFirst(minute),
|
||||
})
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import mapValues from 'lodash/mapValues'
|
||||
import { DateTime } from 'luxon'
|
||||
import moment from 'moment-timezone'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
|
||||
const N = (pattern, fromDate = '2018-04-09T06:25') =>
|
||||
next(parse(pattern), DateTime.fromISO(fromDate, { zone: 'utc' })).toISO({
|
||||
includeOffset: false,
|
||||
suppressMilliseconds: true,
|
||||
suppressSeconds: true,
|
||||
})
|
||||
const N = (pattern, fromDate = '2018-04-09T06:25') => {
|
||||
const iso = next(parse(pattern), moment.utc(fromDate)).toISOString()
|
||||
return iso.slice(0, iso.lastIndexOf(':'))
|
||||
}
|
||||
|
||||
describe('next()', () => {
|
||||
mapValues(
|
||||
@@ -43,4 +41,8 @@ describe('next()', () => {
|
||||
'no solutions found for this schedule'
|
||||
)
|
||||
})
|
||||
|
||||
it('select the first sunday of the month', () => {
|
||||
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -173,18 +173,13 @@ export default createParser({
|
||||
{
|
||||
aliases: 'jan feb mar apr may jun jul aug sep oct nov dec'.split(' '),
|
||||
name: 'month',
|
||||
range: [1, 12],
|
||||
|
||||
// this function is applied to numeric entries (not steps)
|
||||
//
|
||||
// currently parse month 0-11
|
||||
post: value => value + 1,
|
||||
range: [0, 11],
|
||||
},
|
||||
{
|
||||
aliases: 'mon tue wen thu fri sat sun'.split(' '),
|
||||
aliases: 'sun mon tue wen thu fri sat'.split(' '),
|
||||
name: 'dayOfWeek',
|
||||
post: value => (value === 0 ? 7 : value),
|
||||
range: [1, 7],
|
||||
post: value => (value === 7 ? 0 : value),
|
||||
range: [0, 6],
|
||||
},
|
||||
],
|
||||
presets: {
|
||||
|
||||
@@ -8,22 +8,22 @@ describe('parse()', () => {
|
||||
minute: [0],
|
||||
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
dayOfMonth: [1, 11, 21, 31],
|
||||
month: [1, 3, 5, 8, 11],
|
||||
month: [0, 2, 4, 7, 10],
|
||||
})
|
||||
})
|
||||
|
||||
it('correctly parse months', () => {
|
||||
expect(parse('* * * 0,11 *')).toEqual({
|
||||
month: [1, 12],
|
||||
month: [0, 11],
|
||||
})
|
||||
expect(parse('* * * jan,dec *')).toEqual({
|
||||
month: [1, 12],
|
||||
month: [0, 11],
|
||||
})
|
||||
})
|
||||
|
||||
it('correctly parse days', () => {
|
||||
expect(parse('* * * * mon,sun')).toEqual({
|
||||
dayOfWeek: [1, 7],
|
||||
dayOfWeek: [0, 1],
|
||||
})
|
||||
})
|
||||
|
||||
@@ -40,10 +40,10 @@ describe('parse()', () => {
|
||||
|
||||
it('dayOfWeek: 0 and 7 bind to sunday', () => {
|
||||
expect(parse('* * * * 0')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
expect(parse('* * * * 7')).toEqual({
|
||||
dayOfWeek: [7],
|
||||
dayOfWeek: [0],
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,121 +1,184 @@
|
||||
# ChangeLog
|
||||
|
||||
## **5.17.0** (2018-03-02)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
|
||||
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
|
||||
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
|
||||
- Basic backup: snapshots names [#2668](https://github.com/vatesfr/xen-orchestra/issues/2668)
|
||||
- Change placement of "share" button for self [#2663](https://github.com/vatesfr/xen-orchestra/issues/2663)
|
||||
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
|
||||
- Backup report for VDI chain status [#2639](https://github.com/vatesfr/xen-orchestra/issues/2639)
|
||||
- [Dashboard/Health] Control domain VDIs should includes snapshots [#2634](https://github.com/vatesfr/xen-orchestra/issues/2634)
|
||||
- Do not count VM-snapshot in self quota [#2626](https://github.com/vatesfr/xen-orchestra/issues/2626)
|
||||
- [xo-web] Backup logs [#2618](https://github.com/vatesfr/xen-orchestra/issues/2618)
|
||||
- [VM/Snapshots] grouped deletion [#2595](https://github.com/vatesfr/xen-orchestra/issues/2595)
|
||||
- [Backups] add a new state for a VM: skipped [#2591](https://github.com/vatesfr/xen-orchestra/issues/2591)
|
||||
- Set a self-service VM at "share" after creation [#2589](https://github.com/vatesfr/xen-orchestra/issues/2589)
|
||||
- [Backup logs] Improve Unhealthy VDI Chain message [#2586](https://github.com/vatesfr/xen-orchestra/issues/2586)
|
||||
- [SortedTable] Put sort criteria in URL like the filter [#2584](https://github.com/vatesfr/xen-orchestra/issues/2584)
|
||||
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
|
||||
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
|
||||
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
|
||||
|
||||
### Bugs
|
||||
|
||||
- Limit VDI export concurrency [#2672](https://github.com/vatesfr/xen-orchestra/issues/2672)
|
||||
- Select is broken outside dev mode [#2645](https://github.com/vatesfr/xen-orchestra/issues/2645)
|
||||
- "New" XOSAN automatically register the user [#2625](https://github.com/vatesfr/xen-orchestra/issues/2625)
|
||||
- [VM/Advanced] Error on resource set change should not be hidden [#2620](https://github.com/vatesfr/xen-orchestra/issues/2620)
|
||||
- misspelled word [#2606](https://github.com/vatesfr/xen-orchestra/issues/2606)
|
||||
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
|
||||
|
||||
## **5.16.0** (2018-01-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
- Use @xen-orchestra/cron everywhere [#2616](https://github.com/vatesfr/xen-orchestra/issues/2616)
|
||||
- [SortedTable] Possibility to specify grouped/individual actions together [#2596](https://github.com/vatesfr/xen-orchestra/issues/2596)
|
||||
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
|
||||
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
|
||||
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
|
||||
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
|
||||
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
|
||||
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
|
||||
- Two factor auth [#1897](https://github.com/vatesfr/xen-orchestra/issues/1897)
|
||||
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
- Self Service User - User don't have quota in his dashboard [#1538](https://github.com/vatesfr/xen-orchestra/issues/1538)
|
||||
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
|
||||
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
|
||||
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
|
||||
|
||||
### Bugs
|
||||
|
||||
- [cron] toJSDate is not a function [#2661](https://github.com/vatesfr/xen-orchestra/issues/2661)
|
||||
- [Delta backup] Merge should not fail when delta contains no data [#2635](https://github.com/vatesfr/xen-orchestra/issues/2635)
|
||||
- Select issues [#2590](https://github.com/vatesfr/xen-orchestra/issues/2590)
|
||||
- Fix selects display [#2575](https://github.com/vatesfr/xen-orchestra/issues/2575)
|
||||
- [SortedTable] Stuck when displaying last page [#2569](https://github.com/vatesfr/xen-orchestra/issues/2569)
|
||||
- [vm/network] Duplicate key error [#2553](https://github.com/vatesfr/xen-orchestra/issues/2553)
|
||||
- Jobs vm.revert failing all the time [#2498](https://github.com/vatesfr/xen-orchestra/issues/2498)
|
||||
- TZ selector is not used for backup schedule preview [#2464](https://github.com/vatesfr/xen-orchestra/issues/2464)
|
||||
- Remove filter in VM/network view [#2548](https://github.com/vatesfr/xen-orchestra/issues/2548)
|
||||
|
||||
|
||||
## **5.15.0** (2017-12-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
|
||||
* Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
|
||||
* New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
|
||||
* Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
|
||||
* [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
|
||||
* Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
|
||||
* Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
|
||||
* Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
|
||||
* Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
* [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
* Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
- VDI resize online method removed in 7.3 [#2542](https://github.com/vatesfr/xen-orchestra/issues/2542)
|
||||
- Smart replace VDI.pool_migrate removed from XenServer 7.3 Free [#2541](https://github.com/vatesfr/xen-orchestra/issues/2541)
|
||||
- New memory constraints in XenServer 7.3 [#2540](https://github.com/vatesfr/xen-orchestra/issues/2540)
|
||||
- Link to Settings/Logs for admins in error notifications [#2516](https://github.com/vatesfr/xen-orchestra/issues/2516)
|
||||
- [Self Service] Do not use placehodlers to describe inputs [#2509](https://github.com/vatesfr/xen-orchestra/issues/2509)
|
||||
- Obfuscate password in log in LDAP plugin test [#2506](https://github.com/vatesfr/xen-orchestra/issues/2506)
|
||||
- Log rotation [#2492](https://github.com/vatesfr/xen-orchestra/issues/2492)
|
||||
- Continuous Replication TAG [#2473](https://github.com/vatesfr/xen-orchestra/issues/2473)
|
||||
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
|
||||
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
|
||||
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
|
||||
|
||||
### Bugs
|
||||
|
||||
* VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
|
||||
* Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
|
||||
* [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
|
||||
* Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
|
||||
* VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
|
||||
- VM console doesn't work when using IPv6 in URL [#2530](https://github.com/vatesfr/xen-orchestra/issues/2530)
|
||||
- Retention issue with failed basic backup [#2524](https://github.com/vatesfr/xen-orchestra/issues/2524)
|
||||
- [VM/Advanced] Check that the autopower on setting is working [#2489](https://github.com/vatesfr/xen-orchestra/issues/2489)
|
||||
- Cloud config drive create fail on XenServer < 7 [#2478](https://github.com/vatesfr/xen-orchestra/issues/2478)
|
||||
- VM create fails due to missing vGPU id [#2466](https://github.com/vatesfr/xen-orchestra/issues/2466)
|
||||
|
||||
|
||||
## **5.14.0** (2017-10-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
|
||||
* [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
|
||||
* Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
|
||||
* [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
|
||||
* [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
|
||||
* [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
|
||||
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
* Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
|
||||
* Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
|
||||
* [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
|
||||
* token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
* On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
|
||||
* Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
|
||||
* Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
|
||||
- VM snapshot description display [#2458](https://github.com/vatesfr/xen-orchestra/issues/2458)
|
||||
- [Home] Ability to sort VM by number of snapshots [#2450](https://github.com/vatesfr/xen-orchestra/issues/2450)
|
||||
- Display XS version in host view [#2439](https://github.com/vatesfr/xen-orchestra/issues/2439)
|
||||
- [File restore]: Clarify the possibility to select multiple files [#2438](https://github.com/vatesfr/xen-orchestra/issues/2438)
|
||||
- [Continuous Replication] Time in replicated VMs [#2431](https://github.com/vatesfr/xen-orchestra/issues/2431)
|
||||
- [SortedTable] Active page in URL param [#2405](https://github.com/vatesfr/xen-orchestra/issues/2405)
|
||||
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
- Handle patching licenses [#2382](https://github.com/vatesfr/xen-orchestra/issues/2382)
|
||||
- Credential leaking in logs for messages regarding invalid credentials and "too fast authentication" [#2363](https://github.com/vatesfr/xen-orchestra/issues/2363)
|
||||
- [SortedTable] Keyboard support [#2330](https://github.com/vatesfr/xen-orchestra/issues/2330)
|
||||
- token.create should accept an expiration [#1769](https://github.com/vatesfr/xen-orchestra/issues/1769)
|
||||
- On updater error, display link to documentation [#1610](https://github.com/vatesfr/xen-orchestra/issues/1610)
|
||||
- Add basic vGPU support [#2413](https://github.com/vatesfr/xen-orchestra/issues/2413)
|
||||
- Storage View - Disk Tab - real disk usage [#2475](https://github.com/vatesfr/xen-orchestra/issues/2475)
|
||||
|
||||
### Bugs
|
||||
|
||||
* Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
|
||||
* Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
|
||||
* [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
|
||||
* [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
|
||||
* [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
|
||||
* [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
|
||||
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
* Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
|
||||
|
||||
- Config drive - Custom config not working properly [#2449](https://github.com/vatesfr/xen-orchestra/issues/2449)
|
||||
- Snapshot sorted table breaks copyVm [#2446](https://github.com/vatesfr/xen-orchestra/issues/2446)
|
||||
- [vm/snapshots] Incorrect default sort order [#2442](https://github.com/vatesfr/xen-orchestra/issues/2442)
|
||||
- [Backups/Jobs] Incorrect months mapping [#2427](https://github.com/vatesfr/xen-orchestra/issues/2427)
|
||||
- [Xapi#barrier()] Not compatible with XenServer < 6.1 [#2418](https://github.com/vatesfr/xen-orchestra/issues/2418)
|
||||
- [SortedTable] Change page when no more items on the page [#2401](https://github.com/vatesfr/xen-orchestra/issues/2401)
|
||||
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
- Unable to edit / save restored backup job [#1922](https://github.com/vatesfr/xen-orchestra/issues/1922)
|
||||
|
||||
## **5.13.0** (2017-09-29)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
* [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
* Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
|
||||
* [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
|
||||
* Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
|
||||
* Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
|
||||
* Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
|
||||
* Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
|
||||
* [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
|
||||
* [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
|
||||
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
* Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
|
||||
* [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
|
||||
* [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
|
||||
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
* SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
|
||||
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
- replace all '...' with the UTF-8 equivalent [#2391](https://github.com/vatesfr/xen-orchestra/issues/2391)
|
||||
- [SortedTable] Explicit when no items [#2388](https://github.com/vatesfr/xen-orchestra/issues/2388)
|
||||
- Auto select iqn or lun if there is only one [#2379](https://github.com/vatesfr/xen-orchestra/issues/2379)
|
||||
- [Sparklines] Hide points [#2370](https://github.com/vatesfr/xen-orchestra/issues/2370)
|
||||
- Allow xo-server-recover-account to generate a random password [#2360](https://github.com/vatesfr/xen-orchestra/issues/2360)
|
||||
- Add disk in existing VM as self user [#2348](https://github.com/vatesfr/xen-orchestra/issues/2348)
|
||||
- Sorted table for Settings/server [#2340](https://github.com/vatesfr/xen-orchestra/issues/2340)
|
||||
- Sign in should be case insensitive [#2337](https://github.com/vatesfr/xen-orchestra/issues/2337)
|
||||
- [SortedTable] Extend checkbox click to whole column [#2329](https://github.com/vatesfr/xen-orchestra/issues/2329)
|
||||
- [SortedTable] Ability to select all items (across pages) [#2324](https://github.com/vatesfr/xen-orchestra/issues/2324)
|
||||
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
- Warning on SMB remote creation [#2316](https://github.com/vatesfr/xen-orchestra/issues/2316)
|
||||
- [Home | SortedTable] Add link to syntax doc in the filter input [#2305](https://github.com/vatesfr/xen-orchestra/issues/2305)
|
||||
- [SortedTable] Add optional binding of filter to an URL query [#2301](https://github.com/vatesfr/xen-orchestra/issues/2301)
|
||||
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
- SR view / Disks: option to display non managed VDIs [#1724](https://github.com/vatesfr/xen-orchestra/issues/1724)
|
||||
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
|
||||
### Bugs
|
||||
|
||||
* iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
|
||||
* Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
|
||||
* Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
|
||||
* Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
* iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
|
||||
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
* A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
|
||||
- iSCSI issue on LUN selector [#2374](https://github.com/vatesfr/xen-orchestra/issues/2374)
|
||||
- Errors in VM copy are not properly reported [#2347](https://github.com/vatesfr/xen-orchestra/issues/2347)
|
||||
- Removing a PIF IP fails [#2346](https://github.com/vatesfr/xen-orchestra/issues/2346)
|
||||
- Review and fix creating a VM from a snapshot [#2343](https://github.com/vatesfr/xen-orchestra/issues/2343)
|
||||
- iSCSI LUN Detection fails with authentification [#2339](https://github.com/vatesfr/xen-orchestra/issues/2339)
|
||||
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
- A job shouldn't executable more than once at the same time [#2053](https://github.com/vatesfr/xen-orchestra/issues/2053)
|
||||
|
||||
## **5.12.0** (2017-08-31)
|
||||
|
||||
### Enhancements
|
||||
|
||||
* PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
|
||||
* [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
* Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
|
||||
* SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
|
||||
* Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
|
||||
* "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
|
||||
* [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
|
||||
* Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
|
||||
* It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
|
||||
* [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
* Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
|
||||
* Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
- PIF selector with physical status [#2326](https://github.com/vatesfr/xen-orchestra/issues/2326)
|
||||
- [SortedTable] Range selection [#2323](https://github.com/vatesfr/xen-orchestra/issues/2323)
|
||||
- Self service filter for home/VM view [#2303](https://github.com/vatesfr/xen-orchestra/issues/2303)
|
||||
- SR/Disks Display total of VDIs to coalesce [#2300](https://github.com/vatesfr/xen-orchestra/issues/2300)
|
||||
- Pool filter in the task view [#2293](https://github.com/vatesfr/xen-orchestra/issues/2293)
|
||||
- "Loading" while fetching objects [#2285](https://github.com/vatesfr/xen-orchestra/issues/2285)
|
||||
- [SortedTable] Add grouped actions feature [#2276](https://github.com/vatesfr/xen-orchestra/issues/2276)
|
||||
- Add a filter to the backups' log [#2246](https://github.com/vatesfr/xen-orchestra/issues/2246)
|
||||
- It should not be possible to migrate a halted VM. [#2233](https://github.com/vatesfr/xen-orchestra/issues/2233)
|
||||
- [Home][Keyboard navigation] Allow selecting the objects [#2214](https://github.com/vatesfr/xen-orchestra/issues/2214)
|
||||
- Allow to set pool master [#2213](https://github.com/vatesfr/xen-orchestra/issues/2213)
|
||||
- Continuous Replication Retention [#1692](https://github.com/vatesfr/xen-orchestra/issues/1692)
|
||||
|
||||
### Bugs
|
||||
|
||||
* Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
|
||||
* Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
* VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
|
||||
* Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
|
||||
* Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
|
||||
* Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
|
||||
* [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
- Home pagination bug [#2310](https://github.com/vatesfr/xen-orchestra/issues/2310)
|
||||
- Fix PoolActionBar to add a new SR [#2307](https://github.com/vatesfr/xen-orchestra/issues/2307)
|
||||
- VM snapshots are not correctly deleted [#2304](https://github.com/vatesfr/xen-orchestra/issues/2304)
|
||||
- Parallel deletion of VMs fails [#2297](https://github.com/vatesfr/xen-orchestra/issues/2297)
|
||||
- Continous replication create multiple zombie disks [#2292](https://github.com/vatesfr/xen-orchestra/issues/2292)
|
||||
- Add user to Group issue [#2196](https://github.com/vatesfr/xen-orchestra/issues/2196)
|
||||
- [VM migration] Error if default SR not accessible to target host [#2180](https://github.com/vatesfr/xen-orchestra/issues/2180)
|
||||
|
||||
## **5.11.0** (2017-07-31)
|
||||
|
||||
46
CODE_OF_CONDUCT.md
Normal file
46
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at julien.fontanet@vates.fr. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
16
flow-typed/lodash.js
vendored
Normal file
16
flow-typed/lodash.js
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
declare module 'lodash' {
|
||||
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
|
||||
declare export function isEmpty(mixed): boolean
|
||||
declare export function keyBy<T>(array: T[], iteratee: string): boolean
|
||||
declare export function last<T>(array?: T[]): T | void
|
||||
declare export function map<T1, T2>(
|
||||
collection: T1[],
|
||||
iteratee: (T1) => T2
|
||||
): T2[]
|
||||
declare export function mapValues<K, V1, V2>(
|
||||
object: { [K]: V1 },
|
||||
iteratee: (V1, K) => V2
|
||||
): { [K]: V2 }
|
||||
declare export function noop(...args: mixed[]): void
|
||||
declare export function values<K, V>(object: { [K]: V }): V[]
|
||||
}
|
||||
11
flow-typed/promise-toolbox.js
vendored
Normal file
11
flow-typed/promise-toolbox.js
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
declare module 'promise-toolbox' {
|
||||
declare export function cancelable(Function): Function
|
||||
declare export function defer<T>(): {|
|
||||
promise: Promise<T>,
|
||||
reject: T => void,
|
||||
resolve: T => void,
|
||||
|}
|
||||
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
|
||||
declare export function ignoreErrors(): Promise<void>
|
||||
declare export function timeout<T>(delay: number): Promise<T>
|
||||
}
|
||||
2
flow-typed/xo.js
vendored
Normal file
2
flow-typed/xo.js
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
// eslint-disable-next-line no-undef
|
||||
declare type $Dict<T, K = string> = { [K]: T }
|
||||
12
package.json
12
package.json
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"@babel/register": "^7.0.0-beta.40",
|
||||
"@babel/register": "^7.0.0-beta.44",
|
||||
"babel-7-jest": "^21.3.2",
|
||||
"babel-eslint": "^8.1.2",
|
||||
"benchmark": "^2.1.4",
|
||||
"eslint": "^4.14.0",
|
||||
"eslint-config-standard": "^11.0.0-beta.0",
|
||||
"eslint-config-standard-jsx": "^4.0.2",
|
||||
"eslint-config-standard-jsx": "^5.0.0",
|
||||
"eslint-plugin-import": "^2.8.0",
|
||||
"eslint-plugin-node": "^6.0.0",
|
||||
"eslint-plugin-promise": "^3.6.0",
|
||||
"eslint-plugin-react": "^7.6.1",
|
||||
"eslint-plugin-standard": "^3.0.1",
|
||||
"exec-promise": "^0.7.0",
|
||||
"flow-bin": "^0.66.0",
|
||||
"flow-bin": "^0.69.0",
|
||||
"globby": "^8.0.0",
|
||||
"husky": "^0.14.3",
|
||||
"jest": "^22.0.4",
|
||||
@@ -43,6 +43,7 @@
|
||||
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
|
||||
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
|
||||
"\\.jsx?$": "babel-jest"
|
||||
}
|
||||
},
|
||||
@@ -51,12 +52,13 @@
|
||||
"build": "scripts/run-script --parallel build",
|
||||
"clean": "scripts/run-script --parallel clean",
|
||||
"dev": "scripts/run-script --parallel dev",
|
||||
"dev-test": "jest --bail --watch",
|
||||
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"posttest": "scripts/run-script test",
|
||||
"precommit": "scripts/lint-staged",
|
||||
"prepare": "scripts/run-script prepare",
|
||||
"pretest": "eslint --ignore-path .gitignore .",
|
||||
"test": "jest && flow status"
|
||||
"test": "jest \"^(?!.*\\.integ\\.spec\\.js$)\"",
|
||||
"test-integration": "jest \".integ\\.spec\\.js$\""
|
||||
},
|
||||
"workspaces": [
|
||||
"@xen-orchestra/*",
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "complex-matcher",
|
||||
"version": "0.2.1",
|
||||
"version": "0.3.0",
|
||||
"license": "ISC",
|
||||
"description": "",
|
||||
"keywords": [],
|
||||
@@ -30,9 +30,9 @@
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.1",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -70,6 +70,29 @@ export class And extends Node {
|
||||
}
|
||||
}
|
||||
|
||||
export class Comparison extends Node {
|
||||
constructor (operator, value) {
|
||||
super()
|
||||
this._comparator = Comparison.comparators[operator]
|
||||
this._operator = operator
|
||||
this._value = value
|
||||
}
|
||||
|
||||
match (value) {
|
||||
return typeof value === 'number' && this._comparator(value, this._value)
|
||||
}
|
||||
|
||||
toString () {
|
||||
return this._operator + String(this._value)
|
||||
}
|
||||
}
|
||||
Comparison.comparators = {
|
||||
'>': (a, b) => a > b,
|
||||
'>=': (a, b) => a >= b,
|
||||
'<': (a, b) => a < b,
|
||||
'<=': (a, b) => a <= b,
|
||||
}
|
||||
|
||||
export class Or extends Node {
|
||||
constructor (children) {
|
||||
super()
|
||||
@@ -408,6 +431,13 @@ const parser = P.grammar({
|
||||
P.text(')')
|
||||
).map(_ => new Or(_[4])),
|
||||
P.seq(P.text('!'), r.ws, r.term).map(_ => new Not(_[2])),
|
||||
P.seq(P.regex(/[<>]=?/), r.rawString).map(([op, val]) => {
|
||||
val = +val
|
||||
if (Number.isNaN(val)) {
|
||||
throw new TypeError('value must be a number')
|
||||
}
|
||||
return new Comparison(op, val)
|
||||
}),
|
||||
P.seq(r.string, r.ws, P.text(':'), r.ws, r.term).map(
|
||||
_ => new Property(_[0], _[4])
|
||||
),
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -28,10 +28,10 @@
|
||||
},
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
},
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"struct-fu": "^1.2.0",
|
||||
"@nraynaud/xo-fs": "^0.0.5",
|
||||
"babel-runtime": "^6.22.0",
|
||||
"exec-promise": "^0.7.0"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import assert from 'assert'
|
||||
import fu from '@nraynaud/struct-fu'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname } from 'path'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xen-api",
|
||||
"version": "0.16.5",
|
||||
"version": "0.16.9",
|
||||
"license": "ISC",
|
||||
"description": "Connector to the Xen API",
|
||||
"keywords": [
|
||||
|
||||
@@ -143,7 +143,9 @@ export const isOpaqueRef = value =>
|
||||
|
||||
const RE_READ_ONLY_METHOD = /^[^.]+\.get_/
|
||||
const isReadOnlyCall = (method, args) =>
|
||||
args.length === 1 && isOpaqueRef(args[0]) && RE_READ_ONLY_METHOD.test(method)
|
||||
args.length === 1 &&
|
||||
typeof args[0] === 'string' &&
|
||||
RE_READ_ONLY_METHOD.test(method)
|
||||
|
||||
// Prepare values before passing them to the XenAPI:
|
||||
//
|
||||
@@ -180,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const getTaskResult = (task, onSuccess, onFailure) => {
|
||||
const getTaskResult = task => {
|
||||
const { status } = task
|
||||
if (status === 'cancelled') {
|
||||
return [onFailure(new Cancel('task canceled'))]
|
||||
return Promise.reject(new Cancel('task canceled'))
|
||||
}
|
||||
if (status === 'failure') {
|
||||
return [onFailure(wrapError(task.error_info))]
|
||||
return Promise.reject(wrapError(task.error_info))
|
||||
}
|
||||
if (status === 'success') {
|
||||
// the result might be:
|
||||
// - empty string
|
||||
// - an opaque reference
|
||||
// - an XML-RPC value
|
||||
return [onSuccess(task.result)]
|
||||
return Promise.resolve(task.result)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +246,7 @@ export class Xapi extends EventEmitter {
|
||||
objects.getKey = getKey
|
||||
|
||||
this._objectsByRefs = createObject(null)
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = null
|
||||
this._objectsByRefs['OpaqueRef:NULL'] = undefined
|
||||
|
||||
this._taskWatchers = Object.create(null)
|
||||
|
||||
@@ -407,22 +409,22 @@ export class Xapi extends EventEmitter {
|
||||
return this._readOnly && !isReadOnlyCall(method, args)
|
||||
? Promise.reject(new Error(`cannot call ${method}() in read only mode`))
|
||||
: this._sessionCall(`Async.${method}`, args).then(taskRef => {
|
||||
$cancelToken.promise.then(() => {
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
$cancelToken.promise.then(() => {
|
||||
// TODO: do not trigger if the task is already over
|
||||
this._sessionCall('task.cancel', [taskRef]).catch(noop)
|
||||
})
|
||||
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
return this.watchTask(taskRef)::lastly(() => {
|
||||
this._sessionCall('task.destroy', [taskRef]).catch(noop)
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// create a task and automatically destroy it when settled
|
||||
//
|
||||
// allowed even in read-only mode because it does not have impact on the
|
||||
// XenServer and it's necessary for getResource()
|
||||
createTask (nameLabel, nameDescription = '') {
|
||||
if (this._readOnly) {
|
||||
return Promise.reject(new Error('cannot create task in read only mode'))
|
||||
}
|
||||
|
||||
const promise = this._sessionCall('task.create', [
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
@@ -441,16 +443,18 @@ export class Xapi extends EventEmitter {
|
||||
// this lib), UUID (unique identifier that some objects have) or
|
||||
// opaque reference (internal to XAPI).
|
||||
getObject (idOrUuidOrRef, defaultValue) {
|
||||
const object =
|
||||
typeof idOrUuidOrRef === 'string'
|
||||
? this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
: this._objects.all[idOrUuidOrRef.$id]
|
||||
if (typeof idOrUuidOrRef === 'object') {
|
||||
idOrUuidOrRef = idOrUuidOrRef.$id
|
||||
}
|
||||
|
||||
if (object) return object
|
||||
const object =
|
||||
this._objects.all[idOrUuidOrRef] || this._objectsByRefs[idOrUuidOrRef]
|
||||
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is not object can be matched to ' + idOrUuidOrRef)
|
||||
throw new Error('no object with UUID or opaque ref: ' + idOrUuidOrRef)
|
||||
}
|
||||
|
||||
// Returns the object for a given opaque reference (internal to
|
||||
@@ -458,11 +462,11 @@ export class Xapi extends EventEmitter {
|
||||
getObjectByRef (ref, defaultValue) {
|
||||
const object = this._objectsByRefs[ref]
|
||||
|
||||
if (object) return object
|
||||
if (object !== undefined) return object
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the ref ' + ref)
|
||||
throw new Error('no object with opaque ref: ' + ref)
|
||||
}
|
||||
|
||||
// Returns the object for a given UUID (unique identifier that some
|
||||
@@ -475,7 +479,7 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
if (arguments.length > 1) return defaultValue
|
||||
|
||||
throw new Error('there is no object with the UUID ' + uuid)
|
||||
throw new Error('no object with UUID: ' + uuid)
|
||||
}
|
||||
|
||||
getRecord (type, ref) {
|
||||
@@ -575,31 +579,31 @@ export class Xapi extends EventEmitter {
|
||||
// redirection before consuming body
|
||||
const promise = isStream
|
||||
? doRequest({
|
||||
body: '',
|
||||
body: '',
|
||||
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
// omit task_id because this request will fail on purpose
|
||||
query: 'task_id' in query ? omit(query, 'task_id') : query,
|
||||
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
maxRedirects: 0,
|
||||
}).then(
|
||||
response => {
|
||||
response.req.abort()
|
||||
return doRequest()
|
||||
},
|
||||
error => {
|
||||
let response
|
||||
if (error != null && (response = error.response) != null) {
|
||||
response.req.abort()
|
||||
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
const { headers: { location }, statusCode } = response
|
||||
if (statusCode === 302 && location !== undefined) {
|
||||
return doRequest(location)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
)
|
||||
throw error
|
||||
}
|
||||
)
|
||||
: doRequest()
|
||||
|
||||
return promise.then(response => {
|
||||
@@ -638,11 +642,11 @@ export class Xapi extends EventEmitter {
|
||||
let watcher = watchers[ref]
|
||||
if (watcher === undefined) {
|
||||
// sync check if the task is already settled
|
||||
const task = this.objects.all[ref]
|
||||
const task = this._objectsByRefs[ref]
|
||||
if (task !== undefined) {
|
||||
const result = getTaskResult(task, Promise.resolve, Promise.reject)
|
||||
if (result) {
|
||||
return result[0]
|
||||
const result = getTaskResult(task)
|
||||
if (result !== undefined) {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,11 +793,12 @@ export class Xapi extends EventEmitter {
|
||||
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (
|
||||
taskWatcher !== undefined &&
|
||||
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
|
||||
) {
|
||||
delete taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
const result = getTaskResult(object)
|
||||
if (result !== undefined) {
|
||||
taskWatcher.resolve(result)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -813,7 +818,10 @@ export class Xapi extends EventEmitter {
|
||||
const taskWatchers = this._taskWatchers
|
||||
const taskWatcher = taskWatchers[ref]
|
||||
if (taskWatcher !== undefined) {
|
||||
taskWatcher.reject(new Error('task has been destroyed before completion'))
|
||||
const error = new Error('task has been destroyed before completion')
|
||||
error.task = object
|
||||
error.taskRef = ref
|
||||
taskWatcher.reject(error)
|
||||
delete taskWatchers[ref]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -105,6 +105,12 @@ encoding by prefixing with `json:`:
|
||||
> xo-cli foo.bar baz='json:[1, 2, 3]'
|
||||
```
|
||||
|
||||
##### Configuration export
|
||||
|
||||
```
|
||||
> xo-cli xo.exportConfig @=config.json
|
||||
```
|
||||
|
||||
##### VM export
|
||||
|
||||
```
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.40",
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"bluebird": "^3.5.1",
|
||||
"chalk": "^2.2.0",
|
||||
"event-to-promise": "^0.8.0",
|
||||
@@ -49,10 +49,10 @@
|
||||
"xo-lib": "^0.9.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
@@ -62,7 +62,6 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"pretest": "flow status"
|
||||
"prepublishOnly": "yarn run build"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -328,6 +328,15 @@ async function listObjects (args) {
|
||||
}
|
||||
exports.listObjects = listObjects
|
||||
|
||||
function ensurePathParam (method, value) {
|
||||
if (typeof value !== 'string') {
|
||||
const error =
|
||||
method +
|
||||
' requires the @ parameter to be a path (e.g. @=/tmp/config.json)'
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async function call (args) {
|
||||
if (!args.length) {
|
||||
throw new Error('missing command name')
|
||||
@@ -350,6 +359,7 @@ async function call (args) {
|
||||
key = keys[0]
|
||||
|
||||
if (key === '$getFrom') {
|
||||
ensurePathParam(method, file)
|
||||
url = resolveUrl(baseUrl, result[key])
|
||||
const output = createWriteStream(file)
|
||||
|
||||
@@ -371,6 +381,7 @@ async function call (args) {
|
||||
}
|
||||
|
||||
if (key === '$sendTo') {
|
||||
ensurePathParam(method, file)
|
||||
url = resolveUrl(baseUrl, result[key])
|
||||
|
||||
const stats = await stat(file)
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
"prepare": "yarn run build"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-auth-saml",
|
||||
"version": "0.5.0",
|
||||
"version": "0.5.1",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "SAML authentication plugin for XO-Server",
|
||||
"keywords": [
|
||||
|
||||
@@ -3,22 +3,32 @@ import { Strategy } from 'passport-saml'
|
||||
// ===================================================================
|
||||
|
||||
export const configurationSchema = {
|
||||
description:
|
||||
'Important: When registering your instance to your identity provider, you must configure its callback URL to `https://<xo.company.net>/signin/saml/callback`!',
|
||||
type: 'object',
|
||||
properties: {
|
||||
cert: {
|
||||
title: 'Certificate',
|
||||
description: "Copy/paste the identity provider's certificate",
|
||||
type: 'string',
|
||||
},
|
||||
entryPoint: {
|
||||
title: 'Entry point',
|
||||
description: 'Entry point of the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
issuer: {
|
||||
title: 'Issuer',
|
||||
description: 'Issuer string to supply to the identity provider',
|
||||
type: 'string',
|
||||
},
|
||||
usernameField: {
|
||||
title: 'Username field',
|
||||
description: 'Field to use as the XO username',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['cert', 'entryPoint', 'issuer'],
|
||||
required: ['cert', 'entryPoint', 'issuer', 'usernameField'],
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -42,6 +52,7 @@ class AuthSamlXoPlugin {
|
||||
new Strategy(this._conf, async (profile, done) => {
|
||||
const name = profile[this._usernameField]
|
||||
if (!name) {
|
||||
console.warn('xo-server-auth-saml:', profile)
|
||||
done('no name found for this user')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -128,10 +128,15 @@ class XoServerCloud {
|
||||
throw new Error(`cannot get resource: ${namespace} not registered`)
|
||||
}
|
||||
|
||||
const namespaceCatalog = await this._getNamespaceCatalog(namespace)
|
||||
const { _token: token } = await this._getNamespaceCatalog(namespace)
|
||||
|
||||
// 2018-03-20 Extra check: getResourceDownloadToken seems to be called without a token in some cases
|
||||
if (token === undefined) {
|
||||
throw new Error(`${namespace} namespace token is undefined`)
|
||||
}
|
||||
|
||||
const downloadToken = await this._updater.call('getResourceDownloadToken', {
|
||||
token: namespaceCatalog._token,
|
||||
token,
|
||||
id,
|
||||
version,
|
||||
})
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.11.6",
|
||||
"lodash": "^4.16.2"
|
||||
},
|
||||
|
||||
@@ -1,47 +1,3 @@
|
||||
'use strict'
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const pkg = require('./package')
|
||||
|
||||
const plugins = {
|
||||
lodash: {},
|
||||
}
|
||||
|
||||
const presets = {
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
loose: true,
|
||||
shippedProposals: true,
|
||||
targets: __PROD__
|
||||
? (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
return { node: node }
|
||||
}
|
||||
})()
|
||||
: { browsers: '', node: 'current' },
|
||||
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
|
||||
},
|
||||
}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && /@babel\/plugin-.+/.test(name)) {
|
||||
plugins[name] = {}
|
||||
} else if (!(name in presets) && /@babel\/preset-.+/.test(name)) {
|
||||
presets[name] = {}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
plugins: Object.keys(plugins).map(plugin => [plugin, plugins[plugin]]),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
}
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
|
||||
@@ -20,16 +20,16 @@
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"d3-time-format": "^2.1.1",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "7.0.0-beta.40",
|
||||
"@babel/core": "7.0.0-beta.40",
|
||||
"@babel/preset-env": "7.0.0-beta.40",
|
||||
"@babel/preset-flow": "^7.0.0-beta.40",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "^7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^5.1.3",
|
||||
"rimraf": "^2.6.2"
|
||||
|
||||
@@ -58,7 +58,8 @@ export const configurationSchema = {
|
||||
},
|
||||
port: {
|
||||
type: 'integer',
|
||||
description: 'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
description:
|
||||
'port of the SMTP server (defaults to 25 or 465 for TLS)',
|
||||
},
|
||||
secure: {
|
||||
default: false,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server-usage-report",
|
||||
"version": "0.3.2",
|
||||
"version": "0.4.0",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "",
|
||||
"keywords": [
|
||||
@@ -34,7 +34,7 @@
|
||||
"node": ">=4"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"babel-runtime": "^6.23.0",
|
||||
"handlebars": "^4.0.6",
|
||||
"html-minifier": "^3.5.8",
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
.top table{
|
||||
margin: auto;
|
||||
margin-top: 20px;
|
||||
width: 400px;
|
||||
min-width: 30em;
|
||||
}
|
||||
|
||||
.top table caption {
|
||||
@@ -121,6 +121,10 @@
|
||||
border:1px solid #95a5a6;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.allResources table {
|
||||
min-width: 60em
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
@@ -151,86 +155,34 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.vms.number}}</td>
|
||||
<td>
|
||||
{{#if global.vmsEvolution.number}}
|
||||
{{#compare global.vmsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{global.vms.number}} {{normaliseEvolution global.vmsEvolution.number}}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.vms.cpu}} %</td> <!-- One condition doesn't work -->
|
||||
<td style='color:{{#compare global.vmsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.cpu}}
|
||||
{{#compare global.vmsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.vms.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.ram}}
|
||||
{{#compare global.vmsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
</td>
|
||||
<td>{{global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk read:</td>
|
||||
<td>{{global.vms.diskRead}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskRead ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskRead}}
|
||||
{{#compare global.vmsEvolution.diskRead ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskRead}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Disk write:</td>
|
||||
<td>{{global.vms.diskWrite}} MiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.diskWrite ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.diskWrite}}
|
||||
{{#compare global.vmsEvolution.diskWrite ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.diskWrite}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.vms.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netReception}}
|
||||
{{#compare global.vmsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.vms.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.vmsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.vmsEvolution.netTransmission}}
|
||||
{{#compare global.vmsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.vmsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -288,7 +240,7 @@
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topVms.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topVms.netReception}}
|
||||
<tr>
|
||||
@@ -298,7 +250,7 @@
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topVms.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topVms.netTransmission}}
|
||||
<tr>
|
||||
@@ -318,75 +270,33 @@
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Number:</td>
|
||||
<td>{{global.hosts.number}}</td>
|
||||
<td>
|
||||
{{#if global.hostsEvolution.number}}
|
||||
{{#compare global.hostsEvolution.number ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.number}}
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.number}} {{normaliseEvolution global.hostsEvolution.number}}
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>CPU:</td>
|
||||
<td>{{global.hosts.cpu}} %</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.cpu ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.cpu}}
|
||||
{{#compare global.hostsEvolution.cpu ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.cpu}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>RAM:</td>
|
||||
<td>{{global.hosts.ram}} GiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.ram ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.ram}}
|
||||
{{#compare global.hostsEvolution.ram ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.ram}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
|
||||
</td>
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Load average:</td>
|
||||
<td>{{global.hosts.load}} </td>
|
||||
<td style='color:{{#compare global.hostsEvolution.load ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.load}}
|
||||
{{#compare global.hostsEvolution.load ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.load}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>{{global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net reception:</td>
|
||||
<td>{{global.hosts.netReception}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netReception ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netReception}}
|
||||
{{#compare global.hostsEvolution.netReception ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netReception}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network RX:</td>
|
||||
<td>{{global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
|
||||
</td>
|
||||
<tr>
|
||||
<tr>
|
||||
<td>Net transmission:</td>
|
||||
<td>{{global.hosts.netTransmission}} KiB</td>
|
||||
<td style='color:{{#compare global.hostsEvolution.netTransmission ">" 0}} red {{else}} green {{/compare}}'>
|
||||
{{#if global.hostsEvolution.netTransmission}}
|
||||
{{#compare global.hostsEvolution.netTransmission ">" 0}}+{{/compare}}
|
||||
{{global.hostsEvolution.netTransmission}}%
|
||||
{{else}}
|
||||
0
|
||||
{{/if}}
|
||||
<td>Network TX:</td>
|
||||
<td>{{global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
|
||||
</td>
|
||||
<tr>
|
||||
</table>
|
||||
@@ -432,7 +342,7 @@
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Net reception</td>
|
||||
<td rowspan='{{math topHosts.netReception.length "+" 1}}' class="tableHeader">Network RX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netReception}}
|
||||
<tr>
|
||||
@@ -442,7 +352,7 @@
|
||||
</tr>
|
||||
{{/each}}
|
||||
<tr>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Net transmission</td>
|
||||
<td rowspan='{{math topHosts.netTransmission.length "+" 1}}' class="tableHeader">Network TX</td>
|
||||
</tr>
|
||||
{{#each topHosts.netTransmission}}
|
||||
<tr>
|
||||
@@ -464,11 +374,11 @@
|
||||
<th>Name</th>
|
||||
<th>value</th>
|
||||
</tr>
|
||||
{{#each topAllocation}}
|
||||
{{#each topSrs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{this.size}} GiB</td>
|
||||
<td>{{this.value}} GiB</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
@@ -533,8 +443,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.added}}
|
||||
{{#each vmsRessourcesEvolution.added}}
|
||||
{{#if vmsResourcesEvolution.added}}
|
||||
{{#each vmsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -553,8 +463,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if vmsRessourcesEvolution.removed}}
|
||||
{{#each vmsRessourcesEvolution.removed}}
|
||||
{{#if vmsResourcesEvolution.removed}}
|
||||
{{#each vmsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -572,8 +482,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.added}}
|
||||
{{#each hostsRessourcesEvolution.added}}
|
||||
{{#if hostsResourcesEvolution.added}}
|
||||
{{#each hostsResourcesEvolution.added}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -591,8 +501,8 @@
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
</tr>
|
||||
{{#if hostsRessourcesEvolution.removed}}
|
||||
{{#each hostsRessourcesEvolution.removed}}
|
||||
{{#if hostsResourcesEvolution.removed}}
|
||||
{{#each hostsResourcesEvolution.removed}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
@@ -606,5 +516,81 @@
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{#if allResources}}
|
||||
<div class="page">
|
||||
<div class="top allResources">
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<h3 style="text-align: center;">All resources</h3>
|
||||
<hr color="#95a5a6" size="1px"/>
|
||||
<table>
|
||||
<caption>VMs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Disk read (MiB)</th>
|
||||
<th>Disk write (MiB)</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.vms}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.diskRead}} {{normaliseEvolution this.evolution.diskRead}}</td>
|
||||
<td>{{normaliseValue this.diskWrite}} {{normaliseEvolution this.evolution.diskWrite}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>Hosts</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>CPU</th>
|
||||
<th>RAM (GiB)</th>
|
||||
<th>Load average</th>
|
||||
<th>Network RX (KiB)</th>
|
||||
<th>Network TX (KiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.hosts}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.cpu}} % {{normaliseEvolution this.evolution.cpu}}</td>
|
||||
<td>{{normaliseValue this.ram}} {{normaliseEvolution this.evolution.ram}}</td>
|
||||
<td>{{normaliseValue this.load}} {{normaliseEvolution this.evolution.load}}</td>
|
||||
<td>{{normaliseValue this.netReception}} {{normaliseEvolution this.evolution.netReception}}</td>
|
||||
<td>{{normaliseValue this.netTransmission}} {{normaliseEvolution this.evolution.netTransmission}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
<table>
|
||||
<caption>SRs</caption>
|
||||
<tr>
|
||||
<th>UUID</th>
|
||||
<th>Name</th>
|
||||
<th>Total space (GiB)</th>
|
||||
<th>Used space (GiB)</th>
|
||||
<th>Free space (GiB)</th>
|
||||
</tr>
|
||||
{{#each allResources.srs}}
|
||||
<tr>
|
||||
<td>{{shortUUID this.uuid}}</td>
|
||||
<td>{{this.name}}</td>
|
||||
<td>{{normaliseValue this.total}} {{normaliseEvolution this.evolution.total}}</td>
|
||||
<td>{{normaliseValue this.used}}</td>
|
||||
<td>{{normaliseValue this.free}}</td>
|
||||
</tr>
|
||||
{{/each}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
{{/if}}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
concat,
|
||||
differenceBy,
|
||||
filter,
|
||||
find,
|
||||
forEach,
|
||||
isFinite,
|
||||
map,
|
||||
@@ -67,6 +68,10 @@ export const configurationSchema = {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
all: {
|
||||
type: 'boolean',
|
||||
description: "It includes all resources' stats if on.",
|
||||
},
|
||||
periodicity: {
|
||||
type: 'string',
|
||||
enum: ['monthly', 'weekly'],
|
||||
@@ -88,12 +93,12 @@ Handlebars.registerHelper('compare', function (
|
||||
options
|
||||
) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "compare" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "compare" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!compareOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "compare" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "compare" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -104,12 +109,12 @@ Handlebars.registerHelper('compare', function (
|
||||
|
||||
Handlebars.registerHelper('math', function (lvalue, operator, rvalue, options) {
|
||||
if (arguments.length < 3) {
|
||||
throw new Error('Handlerbars Helper "math" needs 2 parameters')
|
||||
throw new Error('Handlebars Helper "math" needs 2 parameters')
|
||||
}
|
||||
|
||||
if (!mathOperators[operator]) {
|
||||
throw new Error(
|
||||
`Handlerbars Helper "math" doesn't know the operator ${operator}`
|
||||
`Handlebars Helper "math" doesn't know the operator ${operator}`
|
||||
)
|
||||
}
|
||||
|
||||
@@ -122,6 +127,23 @@ Handlebars.registerHelper('shortUUID', uuid => {
|
||||
}
|
||||
})
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseValue',
|
||||
value => (isFinite(value) ? round(value, 2) : '-')
|
||||
)
|
||||
|
||||
Handlebars.registerHelper(
|
||||
'normaliseEvolution',
|
||||
value =>
|
||||
new Handlebars.SafeString(
|
||||
isFinite(+value) && +value !== 0
|
||||
? value > 0
|
||||
? `(<b style="color: green;">▲ ${value}</b>)`
|
||||
: `(<b style="color: red;">▼ ${String(value).slice(1)}</b>)`
|
||||
: ''
|
||||
)
|
||||
)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function computeMean (values) {
|
||||
@@ -137,7 +159,7 @@ function computeMean (values) {
|
||||
return sum / n
|
||||
}
|
||||
|
||||
const computeDoubleMean = val => computeMean(val.map(computeMean))
|
||||
const computeDoubleMean = val => computeMean(map(val, computeMean))
|
||||
|
||||
function computeMeans (objects, options) {
|
||||
return zipObject(
|
||||
@@ -170,13 +192,13 @@ function getTop (objects, options) {
|
||||
)
|
||||
}
|
||||
|
||||
function conputePercentage (curr, prev, options) {
|
||||
function computePercentage (curr, prev, options) {
|
||||
return zipObject(
|
||||
options,
|
||||
map(
|
||||
options,
|
||||
opt =>
|
||||
prev[opt] === 0
|
||||
prev[opt] === 0 || prev[opt] === null
|
||||
? 'NONE'
|
||||
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
|
||||
)
|
||||
@@ -185,45 +207,76 @@ function conputePercentage (curr, prev, options) {
|
||||
|
||||
function getDiff (oldElements, newElements) {
|
||||
return {
|
||||
added: differenceBy(oldElements, newElements, 'uuid'),
|
||||
removed: differenceBy(newElements, oldElements, 'uuid'),
|
||||
added: differenceBy(newElements, oldElements, 'uuid'),
|
||||
removed: differenceBy(oldElements, newElements, 'uuid'),
|
||||
}
|
||||
}
|
||||
|
||||
function getMemoryUsedMetric ({ memory, memoryFree = memory }) {
|
||||
return map(memory, (value, key) => value - memoryFree[key])
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
function getVmsStats ({ runningVms, xo }) {
|
||||
return Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(vmStats.stats.memoryUsed) / gibPower,
|
||||
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
|
||||
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
|
||||
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
|
||||
}
|
||||
})
|
||||
async function getVmsStats ({ runningVms, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningVms, async vm => {
|
||||
const vmStats = await xo.getXapiVmStats(vm, 'days')
|
||||
return {
|
||||
uuid: vm.uuid,
|
||||
name: vm.name_label,
|
||||
cpu: computeDoubleMean(vmStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
|
||||
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
|
||||
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
|
||||
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getHostsStats ({ runningHosts, xo }) {
|
||||
return Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
async function getHostsStats ({ runningHosts, xo }) {
|
||||
return orderBy(
|
||||
await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostStats = await xo.getXapiHostStats(host, 'days')
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
|
||||
netTransmission:
|
||||
computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
|
||||
}
|
||||
})
|
||||
),
|
||||
'name',
|
||||
'asc'
|
||||
)
|
||||
}
|
||||
|
||||
function getSrsStats (xoObjects) {
|
||||
return orderBy(
|
||||
map(filter(xoObjects, { type: 'SR' }), sr => {
|
||||
const total = sr.size / gibPower
|
||||
const used = sr.physical_usage / gibPower
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
name: host.name_label,
|
||||
cpu: computeDoubleMean(hostStats.stats.cpus),
|
||||
ram: computeMean(hostStats.stats.memoryUsed) / gibPower,
|
||||
load: computeMean(hostStats.stats.load),
|
||||
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
|
||||
netTransmission: computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
|
||||
uuid: sr.uuid,
|
||||
name: sr.name_label,
|
||||
total,
|
||||
used,
|
||||
free: total - used,
|
||||
}
|
||||
})
|
||||
}),
|
||||
'total',
|
||||
'desc'
|
||||
)
|
||||
}
|
||||
|
||||
@@ -303,20 +356,21 @@ function getTopHosts ({ hostsStats, xo }) {
|
||||
])
|
||||
}
|
||||
|
||||
function getMostAllocatedSpaces ({ disks, xo }) {
|
||||
return map(orderBy(disks, ['size'], ['desc']).slice(0, 3), disk => ({
|
||||
uuid: disk.uuid,
|
||||
name: disk.name_label,
|
||||
size: round(disk.size / gibPower, 2),
|
||||
}))
|
||||
function getTopSrs ({ srsStats, xo }) {
|
||||
return getTop(srsStats, ['total']).total
|
||||
}
|
||||
|
||||
async function getHostsMissingPatches ({ runningHosts, xo }) {
|
||||
const hostsMissingPatches = await Promise.all(
|
||||
map(runningHosts, async host => {
|
||||
const hostsPatches = await xo
|
||||
let hostsPatches = await xo
|
||||
.getXapi(host)
|
||||
.listMissingPoolPatchesOnHost(host._xapiId)
|
||||
|
||||
if (host.license_params.sku_type === 'free') {
|
||||
hostsPatches = filter(hostsPatches, { paid: false })
|
||||
}
|
||||
|
||||
if (hostsPatches.length > 0) {
|
||||
return {
|
||||
uuid: host.uuid,
|
||||
@@ -347,46 +401,75 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
|
||||
const prevDate = oldStats.style.currDate
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...conputePercentage(newStatsVms, oldStatsVms, [
|
||||
const resourcesOptions = {
|
||||
vms: [
|
||||
'cpu',
|
||||
'ram',
|
||||
'diskRead',
|
||||
'diskWrite',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
],
|
||||
hosts: ['cpu', 'ram', 'load', 'netReception', 'netTransmission'],
|
||||
srs: ['total'],
|
||||
}
|
||||
|
||||
const vmsEvolution = {
|
||||
number: newStatsVms.number - oldStatsVms.number,
|
||||
...computePercentage(newStatsVms, oldStatsVms, resourcesOptions.vms),
|
||||
}
|
||||
|
||||
const hostsEvolution = {
|
||||
number: newStatsHosts.number - oldStatsHosts.number,
|
||||
...conputePercentage(newStatsHosts, oldStatsHosts, [
|
||||
'cpu',
|
||||
'ram',
|
||||
'load',
|
||||
'netReception',
|
||||
'netTransmission',
|
||||
]),
|
||||
...computePercentage(
|
||||
newStatsHosts,
|
||||
oldStatsHosts,
|
||||
resourcesOptions.hosts
|
||||
),
|
||||
}
|
||||
|
||||
const vmsRessourcesEvolution = getDiff(
|
||||
const vmsResourcesEvolution = getDiff(
|
||||
oldStatsVms.allVms,
|
||||
newStatsVms.allVms
|
||||
)
|
||||
const hostsRessourcesEvolution = getDiff(
|
||||
const hostsResourcesEvolution = getDiff(
|
||||
oldStatsHosts.allHosts,
|
||||
newStatsHosts.allHosts
|
||||
)
|
||||
|
||||
const usersEvolution = getDiff(oldStats.users, newStats.users)
|
||||
|
||||
const newAllResourcesStats = newStats.allResources
|
||||
const oldAllResourcesStats = oldStats.allResources
|
||||
|
||||
// adding for each resource its evolution
|
||||
if (
|
||||
newAllResourcesStats !== undefined &&
|
||||
oldAllResourcesStats !== undefined
|
||||
) {
|
||||
forEach(newAllResourcesStats, (resource, key) => {
|
||||
const option = resourcesOptions[key]
|
||||
|
||||
if (option !== undefined) {
|
||||
forEach(resource, newItem => {
|
||||
const oldItem = find(oldAllResourcesStats[key], {
|
||||
uuid: newItem.uuid,
|
||||
})
|
||||
|
||||
if (oldItem !== undefined) {
|
||||
newItem.evolution = computePercentage(newItem, oldItem, option)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
vmsEvolution,
|
||||
hostsEvolution,
|
||||
prevDate,
|
||||
vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution,
|
||||
hostsResourcesEvolution,
|
||||
usersEvolution,
|
||||
}
|
||||
} catch (err) {
|
||||
@@ -394,7 +477,7 @@ async function computeEvolution ({ storedStatsPath, ...newStats }) {
|
||||
}
|
||||
}
|
||||
|
||||
async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
async function dataBuilder ({ xo, storedStatsPath, all }) {
|
||||
const xoObjects = values(xo.getObjects())
|
||||
const runningVms = filter(xoObjects, { type: 'VM', power_state: 'Running' })
|
||||
const haltedVms = filter(xoObjects, { type: 'VM', power_state: 'Halted' })
|
||||
@@ -403,18 +486,17 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
power_state: 'Running',
|
||||
})
|
||||
const haltedHosts = filter(xoObjects, { type: 'host', power_state: 'Halted' })
|
||||
const disks = filter(xoObjects, { type: 'SR' })
|
||||
const [
|
||||
users,
|
||||
vmsStats,
|
||||
hostsStats,
|
||||
topAllocation,
|
||||
srsStats,
|
||||
hostsMissingPatches,
|
||||
] = await Promise.all([
|
||||
xo.getAllUsers(),
|
||||
getVmsStats({ xo, runningVms }),
|
||||
getHostsStats({ xo, runningHosts }),
|
||||
getMostAllocatedSpaces({ xo, disks }),
|
||||
getSrsStats(xoObjects),
|
||||
getHostsMissingPatches({ xo, runningHosts }),
|
||||
])
|
||||
|
||||
@@ -423,35 +505,50 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
globalHostsStats,
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
usersEmail,
|
||||
] = await Promise.all([
|
||||
computeGlobalVmsStats({ xo, vmsStats, haltedVms }),
|
||||
computeGlobalHostsStats({ xo, hostsStats, haltedHosts }),
|
||||
getTopVms({ xo, vmsStats }),
|
||||
getTopHosts({ xo, hostsStats }),
|
||||
getTopSrs({ xo, srsStats }),
|
||||
getAllUsersEmail(users),
|
||||
])
|
||||
|
||||
let allResources
|
||||
if (all) {
|
||||
allResources = {
|
||||
vms: vmsStats,
|
||||
hosts: hostsStats,
|
||||
srs: srsStats,
|
||||
date: currDate,
|
||||
}
|
||||
}
|
||||
|
||||
const evolution = await computeEvolution({
|
||||
allResources,
|
||||
storedStatsPath,
|
||||
hosts: globalHostsStats,
|
||||
usersEmail,
|
||||
vms: globalVmsStats,
|
||||
})
|
||||
|
||||
const data = {
|
||||
return {
|
||||
allResources,
|
||||
global: {
|
||||
vms: globalVmsStats,
|
||||
hosts: globalHostsStats,
|
||||
vmsEvolution: evolution && evolution.vmsEvolution,
|
||||
hostsEvolution: evolution && evolution.hostsEvolution,
|
||||
},
|
||||
topVms,
|
||||
topHosts,
|
||||
topSrs,
|
||||
topVms,
|
||||
hostsMissingPatches,
|
||||
usersEmail,
|
||||
topAllocation,
|
||||
vmsRessourcesEvolution: evolution && evolution.vmsRessourcesEvolution,
|
||||
hostsRessourcesEvolution: evolution && evolution.hostsRessourcesEvolution,
|
||||
vmsResourcesEvolution: evolution && evolution.vmsResourcesEvolution,
|
||||
hostsResourcesEvolution: evolution && evolution.hostsResourcesEvolution,
|
||||
usersEvolution: evolution && evolution.usersEvolution,
|
||||
style: {
|
||||
imgXo,
|
||||
@@ -460,8 +557,6 @@ async function dataBuilder ({ xo, storedStatsPath }) {
|
||||
page: '{{page}}',
|
||||
},
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
@@ -472,6 +567,10 @@ class UsageReportPlugin {
|
||||
this._dir = getDataDir
|
||||
// Defined in configure().
|
||||
this._conf = null
|
||||
this._xo.addApiMethod(
|
||||
'plugin.usageReport.send',
|
||||
this._sendReport.bind(this, false)
|
||||
)
|
||||
}
|
||||
|
||||
configure (configuration, state) {
|
||||
@@ -485,7 +584,7 @@ class UsageReportPlugin {
|
||||
configuration.periodicity === 'monthly' ? '00 06 1 * *' : '00 06 * * 0'
|
||||
).createJob(async () => {
|
||||
try {
|
||||
await this._sendReport()
|
||||
await this._sendReport(true)
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'[WARN] scheduled function:',
|
||||
@@ -511,13 +610,14 @@ class UsageReportPlugin {
|
||||
}
|
||||
|
||||
test () {
|
||||
return this._sendReport()
|
||||
return this._sendReport(true)
|
||||
}
|
||||
|
||||
async _sendReport () {
|
||||
async _sendReport (storeData) {
|
||||
const data = await dataBuilder({
|
||||
xo: this._xo,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
all: this._conf.all,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
@@ -537,10 +637,11 @@ class UsageReportPlugin {
|
||||
},
|
||||
],
|
||||
}),
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
storeData &&
|
||||
storeStats({
|
||||
data,
|
||||
storedStatsPath: this._storedStatsPath,
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
3
packages/xo-server/.babelrc.js
Normal file
3
packages/xo-server/.babelrc.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -8,12 +8,14 @@ try {
|
||||
const filtered = frames.filter(function (frame) {
|
||||
const name = frame && frame.getFileName()
|
||||
|
||||
return (// has a filename
|
||||
return (
|
||||
// has a filename
|
||||
name &&
|
||||
// contains a separator (no internal modules)
|
||||
name.indexOf(sep) !== -1 &&
|
||||
// does not start with `internal`
|
||||
name.lastIndexOf('internal', 0) !== -1)
|
||||
name.lastIndexOf('internal', 0) !== -1
|
||||
)
|
||||
})
|
||||
|
||||
// depd (used amongst other by express requires at least 3 frames
|
||||
|
||||
13
packages/xo-server/bin/run-vhd-test
Executable file
13
packages/xo-server/bin/run-vhd-test
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict'
|
||||
|
||||
global.Promise = require('bluebird')
|
||||
|
||||
|
||||
process.on('unhandledRejection', function (reason) {
|
||||
console.warn('[Warn] Possibly unhandled rejection:', reason && reason.stack || reason)
|
||||
})
|
||||
|
||||
|
||||
require("exec-promise")(require("../dist/vhd-test").default)
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "xo-server",
|
||||
"version": "5.16.0",
|
||||
"version": "5.18.3",
|
||||
"license": "AGPL-3.0",
|
||||
"description": "Server part of Xen-Orchestra",
|
||||
"keywords": [
|
||||
@@ -15,7 +15,6 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": "Julien Fontanet <julien.fontanet@vates.fr>",
|
||||
"preferGlobal": true,
|
||||
"files": [
|
||||
"better-stacks.js",
|
||||
@@ -29,16 +28,16 @@
|
||||
"bin": "bin"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=4.5"
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"@babel/polyfill": "7.0.0-beta.44",
|
||||
"@marsaud/smb2-promise": "^0.2.1",
|
||||
"@nraynaud/struct-fu": "^1.0.1",
|
||||
"@xen-orchestra/cron": "^1.0.0",
|
||||
"@xen-orchestra/cron": "^1.0.3",
|
||||
"ajv": "^6.1.1",
|
||||
"app-conf": "^0.5.0",
|
||||
"archiver": "^2.1.0",
|
||||
"babel-runtime": "^6.26.0",
|
||||
"async-iterator-to-stream": "^1.0.1",
|
||||
"base64url": "^2.0.0",
|
||||
"bind-property-descriptor": "^1.0.0",
|
||||
"blocked": "^1.2.1",
|
||||
@@ -54,7 +53,7 @@
|
||||
"escape-string-regexp": "^1.0.5",
|
||||
"event-to-promise": "^0.8.0",
|
||||
"exec-promise": "^0.7.0",
|
||||
"execa": "^0.9.0",
|
||||
"execa": "^0.10.0",
|
||||
"express": "^4.16.2",
|
||||
"express-session": "^1.15.6",
|
||||
"fatfs": "^0.10.4",
|
||||
@@ -66,19 +65,20 @@
|
||||
"highland": "^2.11.1",
|
||||
"http-proxy": "^1.16.2",
|
||||
"http-request-plus": "^0.5.0",
|
||||
"http-server-plus": "^0.8.0",
|
||||
"http-server-plus": "^0.10.0",
|
||||
"human-format": "^0.10.0",
|
||||
"is-redirect": "^1.0.0",
|
||||
"jest-worker": "^22.4.3",
|
||||
"js-yaml": "^3.10.0",
|
||||
"json-rpc-peer": "^0.15.3",
|
||||
"json5": "^0.5.1",
|
||||
"json5": "^1.0.0",
|
||||
"julien-f-source-map-support": "0.1.0",
|
||||
"julien-f-unzip": "^0.2.1",
|
||||
"kindof": "^2.0.0",
|
||||
"level": "^3.0.0",
|
||||
"level-party": "^3.0.4",
|
||||
"level-sublevel": "^6.6.1",
|
||||
"limit-concurrency-decorator": "^0.3.0",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.4",
|
||||
"make-error": "^1",
|
||||
"micromatch": "^3.1.4",
|
||||
@@ -102,13 +102,15 @@
|
||||
"serve-static": "^1.13.1",
|
||||
"split-lines": "^1.1.0",
|
||||
"stack-chain": "^2.0.0",
|
||||
"stoppable": "^1.0.5",
|
||||
"struct-fu": "^1.2.0",
|
||||
"tar-stream": "^1.5.5",
|
||||
"through2": "^2.0.3",
|
||||
"tmp": "^0.0.33",
|
||||
"uuid": "^3.0.1",
|
||||
"value-matcher": "^0.1.0",
|
||||
"ws": "^4.0.0",
|
||||
"xen-api": "^0.16.5",
|
||||
"value-matcher": "^0.2.0",
|
||||
"ws": "^5.0.0",
|
||||
"xen-api": "^0.16.9",
|
||||
"xml2js": "^0.4.19",
|
||||
"xo-acl-resolver": "^0.2.3",
|
||||
"xo-collection": "^0.4.1",
|
||||
@@ -117,13 +119,18 @@
|
||||
"xo-vmdk-to-vhd": "0.0.12"
|
||||
},
|
||||
"devDependencies": {
|
||||
"babel-cli": "^6.26.0",
|
||||
"babel-core": "^6.26.0",
|
||||
"@babel/cli": "7.0.0-beta.44",
|
||||
"@babel/core": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-decorators": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.44",
|
||||
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.44",
|
||||
"@babel/preset-env": "7.0.0-beta.44",
|
||||
"@babel/preset-flow": "7.0.0-beta.44",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"babel-plugin-transform-decorators-legacy": "^1.3.4",
|
||||
"babel-plugin-transform-runtime": "^6.23.0",
|
||||
"babel-preset-env": "^1.6.1",
|
||||
"babel-preset-stage-0": "^6.24.1",
|
||||
"cross-env": "^5.1.3",
|
||||
"index-modules": "^0.3.0",
|
||||
"rimraf": "^2.6.2"
|
||||
@@ -136,23 +143,5 @@
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"start": "node bin/xo-server"
|
||||
},
|
||||
"babel": {
|
||||
"plugins": [
|
||||
"lodash",
|
||||
"transform-decorators-legacy",
|
||||
"transform-runtime"
|
||||
],
|
||||
"presets": [
|
||||
[
|
||||
"env",
|
||||
{
|
||||
"targets": {
|
||||
"node": 4
|
||||
}
|
||||
}
|
||||
],
|
||||
"stage-0"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
173
packages/xo-server/src/api/backup-ng.js
Normal file
173
packages/xo-server/src/api/backup-ng.js
Normal file
@@ -0,0 +1,173 @@
|
||||
export function createJob ({ schedules, ...job }) {
|
||||
job.userId = this.user.id
|
||||
return this.createBackupNgJob(job, schedules)
|
||||
}
|
||||
|
||||
createJob.permission = 'admin'
|
||||
createJob.params = {
|
||||
compression: {
|
||||
enum: ['', 'native'],
|
||||
optional: true,
|
||||
},
|
||||
mode: {
|
||||
enum: ['full', 'delta'],
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
remotes: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
schedules: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
settings: {
|
||||
type: 'object',
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
},
|
||||
}
|
||||
|
||||
export function migrateLegacyJob ({ id }) {
|
||||
return this.migrateLegacyBackupJob(id)
|
||||
}
|
||||
migrateLegacyJob.permission = 'admin'
|
||||
migrateLegacyJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function deleteJob ({ id }) {
|
||||
return this.deleteBackupNgJob(id)
|
||||
}
|
||||
deleteJob.permission = 'admin'
|
||||
deleteJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function editJob (props) {
|
||||
return this.updateJob(props)
|
||||
}
|
||||
|
||||
editJob.permission = 'admin'
|
||||
editJob.params = {
|
||||
compression: {
|
||||
enum: ['', 'native'],
|
||||
optional: true,
|
||||
},
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
mode: {
|
||||
enum: ['full', 'delta'],
|
||||
optional: true,
|
||||
},
|
||||
name: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
remotes: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
settings: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
srs: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
vms: {
|
||||
type: 'object',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
export function getAllJobs () {
|
||||
return this.getAllJobs('backup')
|
||||
}
|
||||
|
||||
getAllJobs.permission = 'admin'
|
||||
|
||||
export function getJob ({ id }) {
|
||||
return this.getJob(id, 'backup')
|
||||
}
|
||||
|
||||
getJob.permission = 'admin'
|
||||
|
||||
getJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export async function runJob ({ id, schedule }) {
|
||||
return this.runJobSequence([id], await this.getSchedule(schedule))
|
||||
}
|
||||
|
||||
runJob.permission = 'admin'
|
||||
|
||||
runJob.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
schedule: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export function deleteVmBackup ({ id }) {
|
||||
return this.deleteVmBackupNg(id)
|
||||
}
|
||||
|
||||
deleteVmBackup.permission = 'admin'
|
||||
|
||||
deleteVmBackup.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
|
||||
export function listVmBackups ({ remotes }) {
|
||||
return this.listVmBackupsNg(remotes)
|
||||
}
|
||||
|
||||
listVmBackups.permission = 'admin'
|
||||
|
||||
listVmBackups.params = {
|
||||
remotes: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
export function importVmBackup ({ id, sr }) {
|
||||
return this.importVmBackupNg(id, sr)
|
||||
}
|
||||
|
||||
importVmBackup.permission = 'admin'
|
||||
|
||||
importVmBackup.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
},
|
||||
sr: {
|
||||
type: 'string',
|
||||
},
|
||||
}
|
||||
@@ -242,7 +242,7 @@ emergencyShutdownHost.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ host, granularity }) {
|
||||
return this.getXapiHostStats(host, granularity)
|
||||
return this.getXapiHostStats(host._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the host'
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// FIXME so far, no acls for jobs
|
||||
|
||||
export async function getAll () {
|
||||
return /* await */ this.getAllJobs()
|
||||
return /* await */ this.getAllJobs('call')
|
||||
}
|
||||
|
||||
getAll.permission = 'admin'
|
||||
getAll.description = 'Gets all available jobs'
|
||||
|
||||
export async function get (id) {
|
||||
return /* await */ this.getJob(id)
|
||||
return /* await */ this.getJob(id, 'call')
|
||||
}
|
||||
|
||||
get.permission = 'admin'
|
||||
|
||||
@@ -99,11 +99,14 @@ set.params = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function get ({ id }) {
|
||||
const { user } = this
|
||||
if (!user) {
|
||||
throw unauthorized()
|
||||
}
|
||||
|
||||
return this.getResourceSet(id)
|
||||
}
|
||||
|
||||
get.permission = 'admin'
|
||||
|
||||
get.params = {
|
||||
id: {
|
||||
type: 'string',
|
||||
|
||||
@@ -17,41 +17,44 @@ get.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export async function create ({ jobId, cron, enabled, name, timezone }) {
|
||||
return /* await */ this.createSchedule(this.session.get('user_id'), {
|
||||
job: jobId,
|
||||
export function create ({ cron, enabled, jobId, name, timezone }) {
|
||||
return this.createSchedule({
|
||||
cron,
|
||||
enabled,
|
||||
jobId,
|
||||
name,
|
||||
timezone,
|
||||
userId: this.session.get('user_id'),
|
||||
})
|
||||
}
|
||||
|
||||
create.permission = 'admin'
|
||||
create.description = 'Creates a new schedule'
|
||||
create.params = {
|
||||
jobId: { type: 'string' },
|
||||
cron: { type: 'string' },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
jobId: { type: 'string' },
|
||||
name: { type: 'string', optional: true },
|
||||
timezone: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
export async function set ({ id, jobId, cron, enabled, name, timezone }) {
|
||||
await this.updateSchedule(id, { job: jobId, cron, enabled, name, timezone })
|
||||
export async function set ({ cron, enabled, id, jobId, name, timezone }) {
|
||||
await this.updateSchedule({ cron, enabled, id, jobId, name, timezone })
|
||||
}
|
||||
|
||||
set.permission = 'admin'
|
||||
set.description = 'Modifies an existing schedule'
|
||||
set.params = {
|
||||
id: { type: 'string' },
|
||||
jobId: { type: 'string', optional: true },
|
||||
cron: { type: 'string', optional: true },
|
||||
enabled: { type: 'boolean', optional: true },
|
||||
id: { type: 'string' },
|
||||
jobId: { type: 'string', optional: true },
|
||||
name: { type: 'string', optional: true },
|
||||
timezone: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
async function delete_ ({ id }) {
|
||||
await this.removeSchedule(id)
|
||||
await this.deleteSchedule(id)
|
||||
}
|
||||
|
||||
delete_.permission = 'admin'
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
export async function enable ({ id }) {
|
||||
const schedule = await this.getSchedule(id)
|
||||
schedule.enabled = true
|
||||
await this.updateSchedule(id, schedule)
|
||||
}
|
||||
|
||||
enable.permission = 'admin'
|
||||
enable.description = "Enables a schedule to run it's job as scheduled"
|
||||
enable.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export async function disable ({ id }) {
|
||||
const schedule = await this.getSchedule(id)
|
||||
schedule.enabled = false
|
||||
await this.updateSchedule(id, schedule)
|
||||
}
|
||||
|
||||
disable.permission = 'admin'
|
||||
disable.description = 'Disables a schedule'
|
||||
disable.params = {
|
||||
id: { type: 'string' },
|
||||
}
|
||||
|
||||
export function getScheduleTable () {
|
||||
return this.scheduleTable
|
||||
}
|
||||
|
||||
disable.permission = 'admin'
|
||||
disable.description = 'Get a map of existing schedules enabled/disabled state'
|
||||
@@ -189,6 +189,7 @@ export async function createNfs ({
|
||||
server,
|
||||
serverPath,
|
||||
nfsVersion,
|
||||
nfsOptions,
|
||||
}) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
@@ -202,6 +203,11 @@ export async function createNfs ({
|
||||
deviceConfig.nfsversion = nfsVersion
|
||||
}
|
||||
|
||||
// if NFS options given
|
||||
if (nfsVersion) {
|
||||
deviceConfig.options = nfsVersion
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
'SR.create',
|
||||
host._xapiRef,
|
||||
@@ -226,6 +232,7 @@ createNfs.params = {
|
||||
server: { type: 'string' },
|
||||
serverPath: { type: 'string' },
|
||||
nfsVersion: { type: 'string', optional: true },
|
||||
nfsOptions: { type: 'string', optional: true },
|
||||
}
|
||||
|
||||
createNfs.resolve = {
|
||||
@@ -241,7 +248,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
scsiId,
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const srRef = await xapi.call(
|
||||
@@ -251,7 +258,7 @@ export async function createHba ({ host, nameLabel, nameDescription, scsiId }) {
|
||||
'0',
|
||||
nameLabel,
|
||||
nameDescription,
|
||||
'lvmoohba', // SR LVM over HBA
|
||||
'lvmohba', // SR LVM over HBA
|
||||
'user', // recommended by Citrix
|
||||
true,
|
||||
{}
|
||||
@@ -366,7 +373,7 @@ export async function probeHba ({ host }) {
|
||||
let xml
|
||||
|
||||
try {
|
||||
await xapi.call('SR.probe', host._xapiRef, 'type', {})
|
||||
await xapi.call('SR.probe', host._xapiRef, {}, 'lvmohba', {})
|
||||
|
||||
throw new Error('the call above should have thrown an error')
|
||||
} catch (error) {
|
||||
@@ -382,7 +389,7 @@ export async function probeHba ({ host }) {
|
||||
hbaDevices.push({
|
||||
hba: hbaDevice.hba.trim(),
|
||||
path: hbaDevice.path.trim(),
|
||||
scsciId: hbaDevice.SCSIid.trim(),
|
||||
scsiId: hbaDevice.SCSIid.trim(),
|
||||
size: hbaDevice.size.trim(),
|
||||
vendor: hbaDevice.vendor.trim(),
|
||||
})
|
||||
@@ -487,8 +494,8 @@ export async function probeIscsiIqns ({
|
||||
|
||||
// if we give user and password
|
||||
if (chapUser && chapPassword) {
|
||||
deviceConfig.chapUser = chapUser
|
||||
deviceConfig.chapPassword = chapPassword
|
||||
deviceConfig.chapuser = chapUser
|
||||
deviceConfig.chappassword = chapPassword
|
||||
}
|
||||
|
||||
// if we give another port than default iSCSI
|
||||
@@ -668,6 +675,34 @@ probeIscsiExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this HBA already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
|
||||
export async function probeHbaExists ({ host, scsiId }) {
|
||||
const xapi = this.getXapi(host)
|
||||
|
||||
const deviceConfig = {
|
||||
SCSIid: scsiId,
|
||||
}
|
||||
|
||||
const xml = parseXml(
|
||||
await xapi.call('SR.probe', host._xapiRef, deviceConfig, 'lvmohba', {})
|
||||
)
|
||||
|
||||
// get the UUID of SR connected to this LUN
|
||||
return ensureArray(xml.SRlist.SR).map(sr => ({ uuid: sr.UUID.trim() }))
|
||||
}
|
||||
|
||||
probeHbaExists.params = {
|
||||
host: { type: 'string' },
|
||||
scsiId: { type: 'string' },
|
||||
}
|
||||
|
||||
probeHbaExists.resolve = {
|
||||
host: ['host', 'host', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// This function helps to detect if this NFS SR already exists in XAPI
|
||||
// It returns a table of SR UUID, empty if no existing connections
|
||||
@@ -803,3 +838,23 @@ getUnhealthyVdiChainsLength.params = {
|
||||
getUnhealthyVdiChainsLength.resolve = {
|
||||
sr: ['id', 'SR', 'operate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ sr, granularity }) {
|
||||
return this.getXapiSrStats(sr._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistic of the sr'
|
||||
|
||||
stats.params = {
|
||||
id: { type: 'string' },
|
||||
granularity: {
|
||||
type: 'string',
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
stats.resolve = {
|
||||
sr: ['id', 'SR', 'view'],
|
||||
}
|
||||
|
||||
@@ -65,7 +65,11 @@ export async function copyVm ({ vm, sr }) {
|
||||
console.log('export delta VM...')
|
||||
const input = await srcXapi.exportDeltaVm(vm)
|
||||
console.log('import delta VM...')
|
||||
await tgtXapi.deleteVm(await tgtXapi.importDeltaVm(input, { srId: sr }))
|
||||
const { transferSize, vm: copyVm } = await tgtXapi.importDeltaVm(input, {
|
||||
srId: sr,
|
||||
})
|
||||
console.log('transfered size:', transferSize)
|
||||
await tgtXapi.deleteVm(copyVm)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -160,12 +160,12 @@ export async function create (params) {
|
||||
])
|
||||
}
|
||||
|
||||
for (const vifId of vm.VIFs) {
|
||||
const vif = this.getObject(vifId, 'VIF')
|
||||
for (const vif of xapiVm.$VIFs) {
|
||||
xapi.xo.addObject(vif)
|
||||
await this.allocIpAddresses(
|
||||
vifId,
|
||||
concat(vif.allowedIpv4Addresses, vif.allowedIpv6Addresses)
|
||||
).catch(() => xapi.deleteVif(vif._xapiId))
|
||||
vif.$id,
|
||||
concat(vif.ipv4_allowed, vif.ipv6_allowed)
|
||||
).catch(() => xapi.deleteVif(vif))
|
||||
}
|
||||
|
||||
if (params.bootAfterCreate) {
|
||||
@@ -323,6 +323,7 @@ create.resolve = {
|
||||
async function delete_ ({
|
||||
delete_disks, // eslint-disable-line camelcase
|
||||
force,
|
||||
forceDeleteDefaultTemplate,
|
||||
vm,
|
||||
|
||||
deleteDisks = delete_disks,
|
||||
@@ -356,12 +357,19 @@ async function delete_ ({
|
||||
)
|
||||
|
||||
// Update resource sets
|
||||
const resourceSet = xapi.xo.getData(vm._xapiId, 'resourceSet')
|
||||
if (resourceSet != null) {
|
||||
if (
|
||||
vm.type === 'VM' && // only regular VMs
|
||||
xapi.xo.getData(vm._xapiId, 'resourceSet') != null
|
||||
) {
|
||||
;this.setVmResourceSet(vm._xapiId, null)::ignoreErrors()
|
||||
}
|
||||
|
||||
return xapi.deleteVm(vm._xapiId, deleteDisks, force)
|
||||
return xapi.deleteVm(
|
||||
vm._xapiId,
|
||||
deleteDisks,
|
||||
force,
|
||||
forceDeleteDefaultTemplate
|
||||
)
|
||||
}
|
||||
|
||||
delete_.params = {
|
||||
@@ -376,6 +384,11 @@ delete_.params = {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
|
||||
forceDeleteDefaultTemplate: {
|
||||
optional: true,
|
||||
type: 'boolean',
|
||||
},
|
||||
}
|
||||
delete_.resolve = {
|
||||
vm: ['id', ['VM', 'VM-snapshot', 'VM-template'], 'administrate'],
|
||||
@@ -411,7 +424,9 @@ insertCd.params = {
|
||||
|
||||
insertCd.resolve = {
|
||||
vm: ['id', 'VM', 'operate'],
|
||||
vdi: ['cd_id', 'VDI', 'view'],
|
||||
// Not compatible with resource sets.
|
||||
// FIXME: find a workaround.
|
||||
vdi: ['cd_id', 'VDI', ''],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -773,7 +788,7 @@ export function importDeltaBackup ({ sr, remote, filePath, mapVdisSrs }) {
|
||||
remoteId: remote,
|
||||
filePath,
|
||||
mapVdisSrs: mapVdisSrsXapi,
|
||||
})
|
||||
}).then(_ => _.vm)
|
||||
}
|
||||
|
||||
importDeltaBackup.params = {
|
||||
@@ -1054,12 +1069,12 @@ export function revert ({ snapshot, snapshotBefore }) {
|
||||
}
|
||||
|
||||
revert.params = {
|
||||
id: { type: 'string' },
|
||||
snapshot: { type: 'string' },
|
||||
snapshotBefore: { type: 'boolean', optional: true },
|
||||
}
|
||||
|
||||
revert.resolve = {
|
||||
snapshot: ['id', 'VM-snapshot', 'administrate'],
|
||||
snapshot: ['snapshot', 'VM-snapshot', 'administrate'],
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
@@ -1336,7 +1351,7 @@ detachPci.resolve = {
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export function stats ({ vm, granularity }) {
|
||||
return this.getXapiVmStats(vm, granularity)
|
||||
return this.getXapiVmStats(vm._xapiId, granularity)
|
||||
}
|
||||
|
||||
stats.description = 'returns statistics about the VM'
|
||||
|
||||
@@ -42,7 +42,9 @@ function handleGetAllObjects (req, res, { filter, limit }) {
|
||||
|
||||
export function getAllObjects ({ filter, limit, ndjson = false }) {
|
||||
return ndjson
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then($getFrom => ({ $getFrom }))
|
||||
? this.registerHttpRequest(handleGetAllObjects, { filter, limit }).then(
|
||||
$getFrom => ({ $getFrom })
|
||||
)
|
||||
: this.getObjects({ filter, limit })
|
||||
}
|
||||
|
||||
|
||||
@@ -665,7 +665,9 @@ export const createSR = defer(async function (
|
||||
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 0 }
|
||||
|
||||
const tmpBoundObjectId = srs.join(',')
|
||||
const tmpBoundObjectId = `tmp_${srs.join(',')}_${Math.random()
|
||||
.toString(32)
|
||||
.slice(2)}`
|
||||
const license = await this.createBoundXosanTrialLicense({
|
||||
boundObjectId: tmpBoundObjectId,
|
||||
})
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { EventEmitter } from 'events'
|
||||
|
||||
import { createRawObject, noop } from './utils'
|
||||
import { noop } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -8,7 +8,7 @@ export default class Connection extends EventEmitter {
|
||||
constructor () {
|
||||
super()
|
||||
|
||||
this._data = createRawObject()
|
||||
this._data = { __proto__: null }
|
||||
}
|
||||
|
||||
// Close the connection.
|
||||
|
||||
@@ -8,7 +8,7 @@ describe('debounce()', () => {
|
||||
let i
|
||||
|
||||
class Foo {
|
||||
@debounce(1e1)
|
||||
@debounce(10)
|
||||
foo () {
|
||||
++i
|
||||
}
|
||||
@@ -18,22 +18,28 @@ describe('debounce()', () => {
|
||||
i = 0
|
||||
})
|
||||
|
||||
it('works', done => {
|
||||
const foo = new Foo()
|
||||
it('works', () => {
|
||||
const savedNow = Date.now
|
||||
try {
|
||||
const now = Date.now()
|
||||
const mockDate = jest.fn()
|
||||
Date.now = mockDate
|
||||
const foo = new Foo()
|
||||
expect(i).toBe(0)
|
||||
|
||||
expect(i).toBe(0)
|
||||
mockDate.mockReturnValueOnce(now)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
mockDate.mockReturnValueOnce(now + 2)
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
foo.foo()
|
||||
expect(i).toBe(1)
|
||||
|
||||
setTimeout(() => {
|
||||
mockDate.mockReturnValueOnce(now + 2 + 10)
|
||||
foo.foo()
|
||||
expect(i).toBe(2)
|
||||
|
||||
done()
|
||||
}, 2e1)
|
||||
} finally {
|
||||
Date.now = savedNow
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@@ -3,16 +3,17 @@ import bind from 'lodash/bind'
|
||||
import blocked from 'blocked'
|
||||
import createExpress from 'express'
|
||||
import createLogger from 'debug'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import has from 'lodash/has'
|
||||
import helmet from 'helmet'
|
||||
import includes from 'lodash/includes'
|
||||
import proxyConsole from './proxy-console'
|
||||
import serveStatic from 'serve-static'
|
||||
import startsWith from 'lodash/startsWith'
|
||||
import stoppable from 'stoppable'
|
||||
import WebSocket from 'ws'
|
||||
import { compile as compilePug } from 'pug'
|
||||
import { createServer as createProxyServer } from 'http-proxy'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
import { join as joinPath } from 'path'
|
||||
|
||||
import JsonRpcPeer from 'json-rpc-peer'
|
||||
@@ -22,7 +23,6 @@ import { ensureDir, readdir, readFile } from 'fs-extra'
|
||||
import WebServer from 'http-server-plus'
|
||||
import Xo from './xo'
|
||||
import {
|
||||
createRawObject,
|
||||
forEach,
|
||||
isArray,
|
||||
isFunction,
|
||||
@@ -103,7 +103,7 @@ function createExpressApp () {
|
||||
}
|
||||
|
||||
async function setUpPassport (express, xo) {
|
||||
const strategies = createRawObject()
|
||||
const strategies = { __proto__: null }
|
||||
xo.registerPassportStrategy = strategy => {
|
||||
passport.use(strategy)
|
||||
|
||||
@@ -333,7 +333,7 @@ async function makeWebServerListen (
|
||||
}
|
||||
|
||||
async function createWebServer ({ listen, listenOptions }) {
|
||||
const webServer = new WebServer()
|
||||
const webServer = stoppable(new WebServer())
|
||||
|
||||
await Promise.all(
|
||||
mapToArray(listen, opts =>
|
||||
@@ -538,9 +538,14 @@ export default async function main (args) {
|
||||
|
||||
{
|
||||
const debug = createLogger('xo:perf')
|
||||
blocked(ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
})
|
||||
blocked(
|
||||
ms => {
|
||||
debug('blocked for %sms', ms | 0)
|
||||
},
|
||||
{
|
||||
threshold: 50,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
const config = await loadConfiguration()
|
||||
@@ -566,7 +571,7 @@ export default async function main (args) {
|
||||
const xo = new Xo(config)
|
||||
|
||||
// Register web server close on XO stop.
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.close(cb)))
|
||||
xo.on('stop', () => pFromCallback(cb => webServer.stop(cb)))
|
||||
|
||||
// Connects to all registered servers.
|
||||
await xo.start()
|
||||
@@ -645,7 +650,7 @@ export default async function main (args) {
|
||||
})
|
||||
})
|
||||
|
||||
await eventToPromise(xo, 'stopped')
|
||||
await fromEvent(xo, 'stopped')
|
||||
|
||||
debug('bye :-)')
|
||||
}
|
||||
|
||||
@@ -1,186 +0,0 @@
|
||||
import { BaseError } from 'make-error'
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { timeout } from 'promise-toolbox'
|
||||
import { assign, filter, find, isEmpty, map, mapValues } from 'lodash'
|
||||
|
||||
import { crossProduct } from './math'
|
||||
import { asyncMap, serializeError, thunkToArray } from './utils'
|
||||
|
||||
export class JobExecutorError extends BaseError {}
|
||||
export class UnsupportedJobType extends JobExecutorError {
|
||||
constructor (job) {
|
||||
super('Unknown job type: ' + job.type)
|
||||
}
|
||||
}
|
||||
export class UnsupportedVectorType extends JobExecutorError {
|
||||
constructor (vector) {
|
||||
super('Unknown vector type: ' + vector.type)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const paramsVectorActionsMap = {
|
||||
extractProperties ({ mapping, value }) {
|
||||
return mapValues(mapping, key => value[key])
|
||||
},
|
||||
crossProduct ({ items }) {
|
||||
return thunkToArray(
|
||||
crossProduct(map(items, value => resolveParamsVector.call(this, value)))
|
||||
)
|
||||
},
|
||||
fetchObjects ({ pattern }) {
|
||||
const objects = filter(this.xo.getObjects(), createPredicate(pattern))
|
||||
if (isEmpty(objects)) {
|
||||
throw new Error('no objects match this pattern')
|
||||
}
|
||||
return objects
|
||||
},
|
||||
map ({ collection, iteratee, paramName = 'value' }) {
|
||||
return map(resolveParamsVector.call(this, collection), value => {
|
||||
return resolveParamsVector.call(this, {
|
||||
...iteratee,
|
||||
[paramName]: value,
|
||||
})
|
||||
})
|
||||
},
|
||||
set: ({ values }) => values,
|
||||
}
|
||||
|
||||
export function resolveParamsVector (paramsVector) {
|
||||
const visitor = paramsVectorActionsMap[paramsVector.type]
|
||||
if (!visitor) {
|
||||
throw new Error(`Unsupported function '${paramsVector.type}'.`)
|
||||
}
|
||||
|
||||
return visitor.call(this, paramsVector)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class JobExecutor {
|
||||
constructor (xo) {
|
||||
this.xo = xo
|
||||
|
||||
// The logger is not available until Xo has started.
|
||||
xo.on('start', () =>
|
||||
xo.getLogger('jobs').then(logger => {
|
||||
this._logger = logger
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async exec (job) {
|
||||
const runJobId = this._logger.notice(`Starting execution of ${job.id}.`, {
|
||||
event: 'job.start',
|
||||
userId: job.userId,
|
||||
jobId: job.id,
|
||||
key: job.key,
|
||||
})
|
||||
|
||||
try {
|
||||
if (job.type === 'call') {
|
||||
const execStatus = await this._execCall(job, runJobId)
|
||||
|
||||
this.xo.emit('job:terminated', execStatus)
|
||||
} else {
|
||||
throw new UnsupportedJobType(job)
|
||||
}
|
||||
|
||||
this._logger.notice(`Execution terminated for ${job.id}.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
})
|
||||
} catch (error) {
|
||||
this._logger.error(`The execution of ${job.id} has failed.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
error: serializeError(error),
|
||||
})
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _execCall (job, runJobId) {
|
||||
const { paramsVector } = job
|
||||
const paramsFlatVector = paramsVector
|
||||
? resolveParamsVector.call(this, paramsVector)
|
||||
: [{}] // One call with no parameters
|
||||
|
||||
const connection = this.xo.createUserConnection()
|
||||
|
||||
connection.set('user_id', job.userId)
|
||||
|
||||
const schedule = find(await this.xo.getAllSchedules(), { job: job.id })
|
||||
|
||||
const execStatus = {
|
||||
calls: {},
|
||||
runJobId,
|
||||
start: Date.now(),
|
||||
timezone: schedule !== undefined ? schedule.timezone : undefined,
|
||||
}
|
||||
|
||||
await asyncMap(paramsFlatVector, params => {
|
||||
const runCallId = this._logger.notice(
|
||||
`Starting ${job.method} call. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.start',
|
||||
runJobId,
|
||||
method: job.method,
|
||||
params,
|
||||
}
|
||||
)
|
||||
|
||||
const call = (execStatus.calls[runCallId] = {
|
||||
method: job.method,
|
||||
params,
|
||||
start: Date.now(),
|
||||
})
|
||||
let promise = this.xo.callApiMethod(
|
||||
connection,
|
||||
job.method,
|
||||
assign({}, params)
|
||||
)
|
||||
if (job.timeout) {
|
||||
promise = promise::timeout(job.timeout)
|
||||
}
|
||||
|
||||
return promise.then(
|
||||
value => {
|
||||
this._logger.notice(
|
||||
`Call ${job.method} (${runCallId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
returnedValue: value,
|
||||
}
|
||||
)
|
||||
|
||||
call.returnedValue = value
|
||||
call.end = Date.now()
|
||||
},
|
||||
reason => {
|
||||
this._logger.notice(
|
||||
`Call ${job.method} (${runCallId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
error: serializeError(reason),
|
||||
}
|
||||
)
|
||||
|
||||
call.error = reason
|
||||
call.end = Date.now()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
connection.close()
|
||||
execStatus.end = Date.now()
|
||||
|
||||
return execStatus
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
|
||||
import { parseProp } from './utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Job extends Model {}
|
||||
|
||||
export class Jobs extends Collection {
|
||||
get Model () {
|
||||
return Job
|
||||
}
|
||||
|
||||
async create (job) {
|
||||
// Serializes.
|
||||
job.paramsVector = JSON.stringify(job.paramsVector)
|
||||
return /* await */ this.add(new Job(job))
|
||||
}
|
||||
|
||||
async save (job) {
|
||||
// Serializes.
|
||||
job.paramsVector = JSON.stringify(job.paramsVector)
|
||||
return /* await */ this.update(job)
|
||||
}
|
||||
|
||||
async get (properties) {
|
||||
const jobs = await super.get(properties)
|
||||
|
||||
// Deserializes.
|
||||
forEach(jobs, job => {
|
||||
job.paramsVector = parseProp('job', job, 'paramsVector', {})
|
||||
|
||||
const { timeout } = job
|
||||
if (timeout !== undefined) {
|
||||
job.timeout = +timeout
|
||||
}
|
||||
})
|
||||
|
||||
return jobs
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
import Collection from '../collection/redis'
|
||||
import Model from '../model'
|
||||
import { forEach } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Schedule extends Model {}
|
||||
|
||||
export class Schedules extends Collection {
|
||||
get Model () {
|
||||
return Schedule
|
||||
}
|
||||
|
||||
create (userId, job, cron, enabled, name = undefined, timezone = undefined) {
|
||||
return this.add(
|
||||
new Schedule({
|
||||
userId,
|
||||
job,
|
||||
cron,
|
||||
enabled,
|
||||
name,
|
||||
timezone,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async save (schedule) {
|
||||
return /* await */ this.update(schedule)
|
||||
}
|
||||
|
||||
async get (properties) {
|
||||
const schedules = await super.get(properties)
|
||||
forEach(schedules, schedule => {
|
||||
schedule.enabled = schedule.enabled === 'true'
|
||||
})
|
||||
return schedules
|
||||
}
|
||||
}
|
||||
15
packages/xo-server/src/patch.js
Normal file
15
packages/xo-server/src/patch.js
Normal file
@@ -0,0 +1,15 @@
|
||||
// @flow
|
||||
|
||||
// patch o: assign properties from p
|
||||
// if the value of a p property is null, delete it from o
|
||||
const patch = <T: {}>(o: T, p: $Shape<T>) => {
|
||||
Object.keys(p).forEach(k => {
|
||||
const v: any = p[k]
|
||||
if (v === null) {
|
||||
delete o[k]
|
||||
} else if (v !== undefined) {
|
||||
o[k] = v
|
||||
}
|
||||
})
|
||||
}
|
||||
export { patch as default }
|
||||
@@ -1,50 +1,58 @@
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import through2 from 'through2'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
// @flow
|
||||
|
||||
import { type Readable, type Writable } from 'stream'
|
||||
import { fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import {
|
||||
addChecksumToReadStream,
|
||||
getPseudoRandomBytes,
|
||||
streamToBuffer,
|
||||
validChecksumOfReadStream,
|
||||
} from '../utils'
|
||||
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
|
||||
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
|
||||
type Data = Buffer | Readable | string
|
||||
type FileDescriptor = {| fd: mixed, path: string |}
|
||||
type LaxReadable = Readable & Object
|
||||
type LaxWritable = Writable & Object
|
||||
|
||||
type File = FileDescriptor | string
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
|
||||
export default class RemoteHandlerAbstract {
|
||||
constructor (remote) {
|
||||
_remote: Object
|
||||
constructor (remote: any) {
|
||||
this._remote = { ...remote, ...parse(remote.url) }
|
||||
if (this._remote.type !== this.type) {
|
||||
throw new Error('Incorrect remote type')
|
||||
}
|
||||
}
|
||||
|
||||
get type () {
|
||||
get type (): string {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
/**
|
||||
* Asks the handler to sync the state of the effective remote with its' metadata
|
||||
*/
|
||||
async sync () {
|
||||
async sync (): Promise<mixed> {
|
||||
return this._sync()
|
||||
}
|
||||
|
||||
async _sync () {
|
||||
async _sync (): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the resources possibly dedicated to put the remote at work, when it is no more needed
|
||||
*/
|
||||
async forget () {
|
||||
return this._forget()
|
||||
async forget (): Promise<void> {
|
||||
await this._forget()
|
||||
}
|
||||
|
||||
async _forget () {
|
||||
async _forget (): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async test () {
|
||||
async test (): Promise<Object> {
|
||||
const testFileName = `${Date.now()}.test`
|
||||
const data = getPseudoRandomBytes(1024 * 1024)
|
||||
let step = 'write'
|
||||
@@ -66,55 +74,81 @@ export default class RemoteHandlerAbstract {
|
||||
error: error.message || String(error),
|
||||
}
|
||||
} finally {
|
||||
;this.unlink(testFileName)::ignoreErrors()
|
||||
ignoreErrors.call(this.unlink(testFileName))
|
||||
}
|
||||
}
|
||||
|
||||
async outputFile (file, data, options) {
|
||||
async outputFile (file: string, data: Data, options?: Object): Promise<void> {
|
||||
return this._outputFile(file, data, {
|
||||
flags: 'wx',
|
||||
...options,
|
||||
})
|
||||
}
|
||||
|
||||
async _outputFile (file, data, options) {
|
||||
async _outputFile (file: string, data: Data, options?: Object): Promise<void> {
|
||||
const stream = await this.createOutputStream(file, options)
|
||||
const promise = eventToPromise(stream, 'finish')
|
||||
const promise = fromEvent(stream, 'finish')
|
||||
stream.end(data)
|
||||
return promise
|
||||
await promise
|
||||
}
|
||||
|
||||
async readFile (file, options) {
|
||||
async readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this._readFile(file, options)
|
||||
}
|
||||
|
||||
_readFile (file, options) {
|
||||
_readFile (file: string, options?: Object): Promise<Buffer> {
|
||||
return this.createReadStream(file, options).then(streamToBuffer)
|
||||
}
|
||||
|
||||
async rename (oldPath, newPath) {
|
||||
return this._rename(oldPath, newPath)
|
||||
async rename (
|
||||
oldPath: string,
|
||||
newPath: string,
|
||||
{ checksum = false }: Object = {}
|
||||
) {
|
||||
let p = this._rename(oldPath, newPath)
|
||||
if (checksum) {
|
||||
p = Promise.all([
|
||||
p,
|
||||
this._rename(checksumFile(oldPath), checksumFile(newPath)),
|
||||
])
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
async _rename (oldPath, newPath) {
|
||||
async _rename (oldPath: string, newPath: string) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async list (dir = '.') {
|
||||
return this._list(dir)
|
||||
async list (
|
||||
dir: string = '.',
|
||||
{
|
||||
filter,
|
||||
prependDir = false,
|
||||
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
|
||||
): Promise<string[]> {
|
||||
const entries = await this._list(dir)
|
||||
|
||||
if (prependDir) {
|
||||
entries.forEach((entry, i) => {
|
||||
entries[i] = dir + '/' + entry
|
||||
})
|
||||
}
|
||||
|
||||
return filter === undefined ? entries : entries.filter(filter)
|
||||
}
|
||||
|
||||
async _list (dir) {
|
||||
async _list (dir: string): Promise<string[]> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
createReadStream (
|
||||
file,
|
||||
{ checksum = false, ignoreMissingChecksum = false, ...options } = {}
|
||||
) {
|
||||
file: string,
|
||||
{ checksum = false, ignoreMissingChecksum = false, ...options }: Object = {}
|
||||
): Promise<LaxReadable> {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = this._createReadStream(file, options).then(stream => {
|
||||
// detect early errors
|
||||
let promise = eventToPromise(stream, 'readable')
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (
|
||||
@@ -124,11 +158,11 @@ export default class RemoteHandlerAbstract {
|
||||
) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
this.getSize(file)
|
||||
.then(size => {
|
||||
ignoreErrors.call(
|
||||
this.getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
::ignoreErrors(),
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
@@ -140,13 +174,16 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
// avoid a unhandled rejection warning
|
||||
;streamP::ignoreErrors()
|
||||
ignoreErrors.call(streamP)
|
||||
|
||||
return this.readFile(`${file}.checksum`).then(
|
||||
return this.readFile(checksumFile(path)).then(
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = validChecksumOfReadStream(stream, String(checksum).trim())
|
||||
stream = (validChecksumOfReadStream(
|
||||
stream,
|
||||
String(checksum).trim()
|
||||
): LaxReadable)
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
@@ -160,18 +197,42 @@ export default class RemoteHandlerAbstract {
|
||||
)
|
||||
}
|
||||
|
||||
async _createReadStream (file, options) {
|
||||
async _createReadStream (
|
||||
file: string,
|
||||
options?: Object
|
||||
): Promise<LaxReadable> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async refreshChecksum (path) {
|
||||
const stream = addChecksumToReadStream(await this.createReadStream(path))
|
||||
stream.resume() // start reading the whole file
|
||||
const checksum = await stream.checksum
|
||||
await this.outputFile(`${path}.checksum`, checksum)
|
||||
async openFile (path: string, flags?: string): Promise<FileDescriptor> {
|
||||
return { fd: await this._openFile(path, flags), path }
|
||||
}
|
||||
|
||||
async createOutputStream (file, { checksum = false, ...options } = {}) {
|
||||
async _openFile (path: string, flags?: string): Promise<mixed> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async closeFile (fd: FileDescriptor): Promise<void> {
|
||||
await this._closeFile(fd.fd)
|
||||
}
|
||||
|
||||
async _closeFile (fd: mixed): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async refreshChecksum (path: string): Promise<void> {
|
||||
const stream = (await this.createReadStream(path)).pipe(
|
||||
createChecksumStream()
|
||||
)
|
||||
stream.resume() // start reading the whole file
|
||||
await this.outputFile(checksumFile(path), await stream.checksum)
|
||||
}
|
||||
|
||||
async createOutputStream (
|
||||
file: File,
|
||||
{ checksum = false, ...options }: Object = {}
|
||||
): Promise<LaxWritable> {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = this._createOutputStream(file, {
|
||||
flags: 'wx',
|
||||
...options,
|
||||
@@ -181,42 +242,47 @@ export default class RemoteHandlerAbstract {
|
||||
return streamP
|
||||
}
|
||||
|
||||
const connectorStream = through2()
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
connectorStream.emit('error', error)
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
|
||||
const streamWithChecksum = addChecksumToReadStream(connectorStream)
|
||||
streamWithChecksum.pipe(await streamP)
|
||||
const stream = await streamP
|
||||
stream.on('error', forwardError)
|
||||
checksumStream.pipe(stream)
|
||||
|
||||
streamWithChecksum.checksum
|
||||
.then(value => this.outputFile(`${file}.checksum`, value))
|
||||
// $FlowFixMe
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value => this.outputFile(checksumFile(path), value))
|
||||
.catch(forwardError)
|
||||
|
||||
return connectorStream
|
||||
return checksumStream
|
||||
}
|
||||
|
||||
async _createOutputStream (file, options) {
|
||||
async _createOutputStream (
|
||||
file: mixed,
|
||||
options?: Object
|
||||
): Promise<LaxWritable> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async unlink (file, { checksum = true } = {}) {
|
||||
async unlink (file: string, { checksum = true }: Object = {}): Promise<void> {
|
||||
if (checksum) {
|
||||
;this._unlink(`${file}.checksum`)::ignoreErrors()
|
||||
ignoreErrors.call(this._unlink(checksumFile(file)))
|
||||
}
|
||||
|
||||
return this._unlink(file)
|
||||
await this._unlink(file)
|
||||
}
|
||||
|
||||
async _unlink (file) {
|
||||
async _unlink (file: mixed): Promise<void> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async getSize (file) {
|
||||
async getSize (file: mixed): Promise<number> {
|
||||
return this._getSize(file)
|
||||
}
|
||||
|
||||
async _getSize (file) {
|
||||
async _getSize (file: mixed): Promise<number> {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
}
|
||||
|
||||
100
packages/xo-server/src/remote-handlers/checksum.js
Normal file
100
packages/xo-server/src/remote-handlers/checksum.js
Normal file
@@ -0,0 +1,100 @@
|
||||
// @flow
|
||||
|
||||
// $FlowFixMe
|
||||
import through2 from 'through2'
|
||||
import { createHash } from 'crypto'
|
||||
import { defer, fromEvent } from 'promise-toolbox'
|
||||
import { invert } from 'lodash'
|
||||
import { type Readable, type Transform } from 'stream'
|
||||
|
||||
// Format: $<algorithm>$<salt>$<encrypted>
|
||||
//
|
||||
// http://man7.org/linux/man-pages/man3/crypt.3.html#NOTES
|
||||
const ALGORITHM_TO_ID = {
|
||||
md5: '1',
|
||||
sha256: '5',
|
||||
sha512: '6',
|
||||
}
|
||||
|
||||
const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
|
||||
|
||||
// Create a through stream which computes the checksum of all data going
|
||||
// through.
|
||||
//
|
||||
// The `checksum` attribute is a promise which resolves at the end of the stream
|
||||
// with a string representation of the checksum.
|
||||
//
|
||||
// const source = ...
|
||||
// const checksumStream = source.pipe(createChecksumStream())
|
||||
// checksumStream.resume() // make the data flow without an output
|
||||
// console.log(await checksumStream.checksum)
|
||||
export const createChecksumStream = (
|
||||
algorithm: string = 'md5'
|
||||
): Transform & { checksum: Promise<string> } => {
|
||||
const algorithmId = ALGORITHM_TO_ID[algorithm]
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithm}`)
|
||||
}
|
||||
|
||||
const hash = createHash(algorithm)
|
||||
const { promise, resolve, reject } = defer()
|
||||
|
||||
const stream = through2(
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
resolve(`$${algorithmId}$$${hash.digest('hex')}`)
|
||||
callback()
|
||||
}
|
||||
).once('error', reject)
|
||||
stream.checksum = promise
|
||||
return stream
|
||||
}
|
||||
|
||||
// Check if the checksum of a readable stream is equals to an expected checksum.
|
||||
// The given stream is wrapped in a stream which emits an error event
|
||||
// if the computed checksum is not equals to the expected checksum.
|
||||
export const validChecksumOfReadStream = (
|
||||
stream: Readable,
|
||||
expectedChecksum: string
|
||||
): Readable & { checksumVerified: Promise<void> } => {
|
||||
const algorithmId = expectedChecksum.slice(
|
||||
1,
|
||||
expectedChecksum.indexOf('$', 1)
|
||||
)
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithmId}`)
|
||||
}
|
||||
|
||||
const hash = createHash(ID_TO_ALGORITHM[algorithmId])
|
||||
|
||||
const wrapper: any = stream.pipe(
|
||||
through2(
|
||||
{ highWaterMark: 0 },
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
|
||||
|
||||
callback(
|
||||
checksum !== expectedChecksum
|
||||
? new Error(
|
||||
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
|
||||
)
|
||||
: null
|
||||
)
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
stream.on('error', error => wrapper.emit('error', error))
|
||||
wrapper.checksumVerified = fromEvent(wrapper, 'end')
|
||||
|
||||
return wrapper
|
||||
}
|
||||
25
packages/xo-server/src/remote-handlers/index.js
Normal file
25
packages/xo-server/src/remote-handlers/index.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// @flow
|
||||
|
||||
import type RemoteHandler from './abstract'
|
||||
import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
|
||||
export type Remote = { url: string }
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
smb: RemoteHandlerSmb,
|
||||
nfs: RemoteHandlerNfs,
|
||||
}
|
||||
|
||||
export const getHandler = (remote: Remote): RemoteHandler => {
|
||||
// FIXME: should be done in xo-remote-parser.
|
||||
const type = remote.url.split('://')[0]
|
||||
|
||||
const Handler = HANDLERS[type]
|
||||
if (!Handler) {
|
||||
throw new Error('Unhandled remote type')
|
||||
}
|
||||
return new Handler(remote)
|
||||
}
|
||||
@@ -63,13 +63,29 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createReadStream (file, options) {
|
||||
return fs.createReadStream(this._getFilePath(file), options)
|
||||
if (typeof file === 'string') {
|
||||
return fs.createReadStream(this._getFilePath(file), options)
|
||||
} else {
|
||||
return fs.createReadStream('', {
|
||||
autoClose: false,
|
||||
...options,
|
||||
fd: file.fd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _createOutputStream (file, options) {
|
||||
const path = this._getFilePath(file)
|
||||
await fs.ensureDir(dirname(path))
|
||||
return fs.createWriteStream(path, options)
|
||||
if (typeof file === 'string') {
|
||||
const path = this._getFilePath(file)
|
||||
await fs.ensureDir(dirname(path))
|
||||
return fs.createWriteStream(path, options)
|
||||
} else {
|
||||
return fs.createWriteStream('', {
|
||||
autoClose: false,
|
||||
...options,
|
||||
fd: file.fd,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _unlink (file) {
|
||||
@@ -82,7 +98,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize (file) {
|
||||
const stats = await fs.stat(this._getFilePath(file))
|
||||
const stats = await fs.stat(
|
||||
this._getFilePath(typeof file === 'string' ? file : file.path)
|
||||
)
|
||||
return stats.size
|
||||
}
|
||||
|
||||
async _openFile (path, flags) {
|
||||
return fs.open(this._getFilePath(path), flags)
|
||||
}
|
||||
|
||||
async _closeFile (fd) {
|
||||
return fs.close(fd)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,6 +139,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createReadStream (file, options = {}) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.path
|
||||
}
|
||||
const client = this._getClient(this._remote)
|
||||
let stream
|
||||
|
||||
@@ -154,6 +157,9 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _createOutputStream (file, options = {}) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.path
|
||||
}
|
||||
const client = this._getClient(this._remote)
|
||||
const path = this._getFilePath(file)
|
||||
const dir = this._dirname(path)
|
||||
@@ -188,13 +194,22 @@ export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
let size
|
||||
|
||||
try {
|
||||
size = await client.getSize(this._getFilePath(file))::pFinally(() => {
|
||||
client.close()
|
||||
})
|
||||
size = await client
|
||||
.getSize(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
::pFinally(() => {
|
||||
client.close()
|
||||
})
|
||||
} catch (error) {
|
||||
throw normalizeError(error)
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
// this is a fake
|
||||
async _openFile (path) {
|
||||
return this._getFilePath(path)
|
||||
}
|
||||
|
||||
async _closeFile (fd) {}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import through2 from 'through2'
|
||||
// @flow
|
||||
|
||||
const createSizeStream = () => {
|
||||
// $FlowFixMe
|
||||
import through2 from 'through2'
|
||||
import { type Readable } from 'stream'
|
||||
|
||||
const createSizeStream = (): Readable & { size: number } => {
|
||||
const wrapper = through2((chunk, enc, cb) => {
|
||||
wrapper.size += chunk.length
|
||||
cb(null, chunk)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import base64url from 'base64url'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import forEach from 'lodash/forEach'
|
||||
import has from 'lodash/has'
|
||||
import highland from 'highland'
|
||||
import humanFormat from 'human-format'
|
||||
import invert from 'lodash/invert'
|
||||
import isArray from 'lodash/isArray'
|
||||
import isString from 'lodash/isString'
|
||||
import keys from 'lodash/keys'
|
||||
@@ -14,24 +12,16 @@ import multiKeyHashInt from 'multikey-hash'
|
||||
import pick from 'lodash/pick'
|
||||
import tmp from 'tmp'
|
||||
import xml2js from 'xml2js'
|
||||
import { resolve } from 'path'
|
||||
|
||||
// Moment timezone can be loaded only one time, it's a workaround to load
|
||||
// the latest version because cron module uses an old version of moment which
|
||||
// does not implement `guess` function for example.
|
||||
import 'moment-timezone'
|
||||
|
||||
import through2 from 'through2'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { dirname, resolve } from 'path'
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
import {
|
||||
all as pAll,
|
||||
defer,
|
||||
fromCallback,
|
||||
isPromise,
|
||||
promisify,
|
||||
reflect as pReflect,
|
||||
} from 'promise-toolbox'
|
||||
import { createHash, randomBytes } from 'crypto'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -78,16 +68,9 @@ export function camelToSnakeCase (string) {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Returns an empty object without prototype (if possible).
|
||||
export const createRawObject = Object.create
|
||||
? (createObject => () => createObject(null))(Object.create)
|
||||
: () => ({})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Only works with string items!
|
||||
export const diffItems = (coll1, coll2) => {
|
||||
const removed = createRawObject()
|
||||
const removed = { __proto__: null }
|
||||
forEach(coll2, value => {
|
||||
removed[value] = true
|
||||
})
|
||||
@@ -106,99 +89,6 @@ export const diffItems = (coll1, coll2) => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const ALGORITHM_TO_ID = {
|
||||
md5: '1',
|
||||
sha256: '5',
|
||||
sha512: '6',
|
||||
}
|
||||
|
||||
const ID_TO_ALGORITHM = invert(ALGORITHM_TO_ID)
|
||||
|
||||
// Wrap a readable stream in a stream with a checksum promise
|
||||
// attribute which is resolved at the end of an input stream.
|
||||
// (Finally .checksum contains the checksum of the input stream)
|
||||
//
|
||||
// Example:
|
||||
// const sourceStream = ...
|
||||
// const targetStream = ...
|
||||
// const checksumStream = addChecksumToReadStream(sourceStream)
|
||||
// await Promise.all([
|
||||
// eventToPromise(checksumStream.pipe(targetStream), 'finish'),
|
||||
// checksumStream.checksum.then(console.log)
|
||||
// ])
|
||||
export const addChecksumToReadStream = (stream, algorithm = 'md5') => {
|
||||
const algorithmId = ALGORITHM_TO_ID[algorithm]
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithm}`)
|
||||
}
|
||||
|
||||
const hash = createHash(algorithm)
|
||||
const { promise, resolve } = defer()
|
||||
|
||||
const wrapper = stream.pipe(
|
||||
through2(
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
resolve(hash.digest('hex'))
|
||||
callback()
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
stream.on('error', error => wrapper.emit('error', error))
|
||||
wrapper.checksum = promise.then(hash => `$${algorithmId}$$${hash}`)
|
||||
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// Check if the checksum of a readable stream is equals to an expected checksum.
|
||||
// The given stream is wrapped in a stream which emits an error event
|
||||
// if the computed checksum is not equals to the expected checksum.
|
||||
export const validChecksumOfReadStream = (stream, expectedChecksum) => {
|
||||
const algorithmId = expectedChecksum.slice(
|
||||
1,
|
||||
expectedChecksum.indexOf('$', 1)
|
||||
)
|
||||
|
||||
if (!algorithmId) {
|
||||
throw new Error(`unknown algorithm: ${algorithmId}`)
|
||||
}
|
||||
|
||||
const hash = createHash(ID_TO_ALGORITHM[algorithmId])
|
||||
|
||||
const wrapper = stream.pipe(
|
||||
through2(
|
||||
{ highWaterMark: 0 },
|
||||
(chunk, enc, callback) => {
|
||||
hash.update(chunk)
|
||||
callback(null, chunk)
|
||||
},
|
||||
callback => {
|
||||
const checksum = `$${algorithmId}$$${hash.digest('hex')}`
|
||||
|
||||
callback(
|
||||
checksum !== expectedChecksum
|
||||
? new Error(
|
||||
`Bad checksum (${checksum}), expected: ${expectedChecksum}`
|
||||
)
|
||||
: null
|
||||
)
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
stream.on('error', error => wrapper.emit('error', error))
|
||||
wrapper.checksumVerified = eventToPromise(wrapper, 'end')
|
||||
|
||||
return wrapper
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Ensure the value is an array, wrap it if necessary.
|
||||
export function ensureArray (value) {
|
||||
if (value === undefined) {
|
||||
@@ -307,7 +197,7 @@ export const parseXml = (function () {
|
||||
// - works only with strings
|
||||
// - methods are already bound and chainable
|
||||
export const lightSet = collection => {
|
||||
let data = createRawObject()
|
||||
let data = { __proto__: null }
|
||||
if (collection) {
|
||||
forEach(collection, value => {
|
||||
data[value] = true
|
||||
@@ -321,7 +211,7 @@ export const lightSet = collection => {
|
||||
return set
|
||||
},
|
||||
clear: () => {
|
||||
data = createRawObject()
|
||||
data = { __proto__: null }
|
||||
return set
|
||||
},
|
||||
delete: value => {
|
||||
@@ -429,6 +319,12 @@ export const popProperty = obj => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// resolve a relative path from a file
|
||||
export const resolveRelativeFromFile = (file, path) =>
|
||||
resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const safeDateFormat = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
|
||||
24
packages/xo-server/src/utils.js.flow
Normal file
24
packages/xo-server/src/utils.js.flow
Normal file
@@ -0,0 +1,24 @@
|
||||
// @flow
|
||||
|
||||
import { type Readable } from 'stream'
|
||||
|
||||
type MaybePromise<T> = Promise<T> | T
|
||||
|
||||
declare export function asyncMap<T1, T2>(
|
||||
collection: MaybePromise<T1[]>,
|
||||
(T1, number) => MaybePromise<T2>
|
||||
): Promise<T2[]>
|
||||
declare export function asyncMap<K, V1, V2>(
|
||||
collection: MaybePromise<{ [K]: V1 }>,
|
||||
(V1, K) => MaybePromise<V2>
|
||||
): Promise<V2[]>
|
||||
|
||||
declare export function getPseudoRandomBytes(n: number): Buffer
|
||||
|
||||
declare export function resolveRelativeFromFile(file: string, path: string): string
|
||||
|
||||
declare export function safeDateFormat(timestamp: number): string
|
||||
|
||||
declare export function serializeError(error: Error): Object
|
||||
|
||||
declare export function streamToBuffer(stream: Readable): Promise<Buffer>
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
createRawObject,
|
||||
diffItems,
|
||||
ensureArray,
|
||||
extractProperty,
|
||||
@@ -32,24 +31,6 @@ describe('camelToSnakeCase()', function () {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('createRawObject()', () => {
|
||||
it('returns an empty object', () => {
|
||||
expect(createRawObject()).toEqual({})
|
||||
})
|
||||
|
||||
it('creates a new object each time', () => {
|
||||
expect(createRawObject()).not.toBe(createRawObject())
|
||||
})
|
||||
|
||||
if (Object.getPrototypeOf) {
|
||||
it('creates an object without a prototype', () => {
|
||||
expect(Object.getPrototypeOf(createRawObject())).toBe(null)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
describe('diffItems', () => {
|
||||
it('computes the added/removed items between 2 iterables', () => {
|
||||
expect(diffItems(['foo', 'bar'], ['baz', 'foo'])).toEqual([
|
||||
|
||||
284
packages/xo-server/src/vhd-merge.integ.spec.js
Normal file
284
packages/xo-server/src/vhd-merge.integ.spec.js
Normal file
@@ -0,0 +1,284 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import execa from 'execa'
|
||||
import fs from 'fs-extra'
|
||||
import rimraf from 'rimraf'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import LocalHandler from './remote-handlers/local'
|
||||
import vhdMerge, {
|
||||
chainVhd,
|
||||
createReadStream,
|
||||
Vhd,
|
||||
VHD_SECTOR_SIZE,
|
||||
} from './vhd-merge'
|
||||
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
|
||||
|
||||
const initialDir = process.cwd()
|
||||
|
||||
jest.setTimeout(10000)
|
||||
|
||||
beforeEach(async () => {
|
||||
const dir = await tmpDir()
|
||||
process.chdir(dir)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
const tmpDir = process.cwd()
|
||||
process.chdir(initialDir)
|
||||
await pFromCallback(cb => rimraf(tmpDir, cb))
|
||||
})
|
||||
|
||||
async function createRandomFile (name, sizeMb) {
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`< /dev/urandom tr -dc "\\t\\n [:alnum:]" | head -c ${sizeMb}M >${name}`,
|
||||
])
|
||||
}
|
||||
|
||||
async function checkFile (vhdName) {
|
||||
await execa('vhd-util', ['check', '-p', '-b', '-t', '-n', vhdName])
|
||||
}
|
||||
|
||||
async function recoverRawContent (vhdName, rawName, originalSize) {
|
||||
await checkFile(vhdName)
|
||||
await execa('qemu-img', ['convert', '-fvpc', '-Oraw', vhdName, rawName])
|
||||
if (originalSize !== undefined) {
|
||||
await execa('truncate', ['-s', originalSize, rawName])
|
||||
}
|
||||
}
|
||||
|
||||
async function convertFromRawToVhd (rawName, vhdName) {
|
||||
await execa('qemu-img', ['convert', '-f', 'raw', '-Ovpc', rawName, vhdName])
|
||||
}
|
||||
|
||||
test('blocks can be moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd._freeFirstBlockSpace(8000000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('the BAT MSB is not used for sign', async () => {
|
||||
const randomBuffer = await pFromCallback(cb =>
|
||||
randomBytes(VHD_SECTOR_SIZE, cb)
|
||||
)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const vhd = new Vhd(handler, 'empty.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
// we want the bit 31 to be on, to prove it's not been used for sign
|
||||
const hugeWritePositionSectors = Math.pow(2, 31) + 200
|
||||
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
|
||||
await checkFile('empty.vhd')
|
||||
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
|
||||
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
|
||||
await vhd._freeFirstBlockSpace(hugePositionBytes)
|
||||
|
||||
// we recover the data manually for speed reasons.
|
||||
// fs.write() with offset is way faster than qemu-img when there is a 1.5To
|
||||
// hole before the block of data
|
||||
const recoveredFile = await fs.open('recovered', 'w')
|
||||
try {
|
||||
const vhd2 = new Vhd(handler, 'empty.vhd')
|
||||
await vhd2.readHeaderAndFooter()
|
||||
await vhd2.readBlockTable()
|
||||
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
|
||||
const entry = vhd._getBatEntry(i)
|
||||
if (entry !== 0xffffffff) {
|
||||
const block = (await vhd2._readBlock(i)).data
|
||||
await fs.write(
|
||||
recoveredFile,
|
||||
block,
|
||||
0,
|
||||
block.length,
|
||||
vhd2.header.blockSize * i
|
||||
)
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
fs.close(recoveredFile)
|
||||
}
|
||||
const recovered = await streamToBuffer(
|
||||
await fs.createReadStream('recovered', {
|
||||
start: hugePositionBytes,
|
||||
end: hugePositionBytes + randomBuffer.length - 1,
|
||||
})
|
||||
)
|
||||
expect(recovered).toEqual(randomBuffer)
|
||||
})
|
||||
|
||||
test('writeData on empty file', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.writeData(0, randomData)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 non-overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
const splitPointSectors = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
|
||||
await newVhd.writeData(
|
||||
splitPointSectors,
|
||||
randomData.slice(splitPointSectors * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('writeData in 2 overlaping operations', async () => {
|
||||
const mbOfRandom = 3
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
|
||||
const randomData = await fs.readFile('randomfile')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'empty.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
const endFirstWrite = 3
|
||||
const startSecondWrite = 2
|
||||
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
|
||||
await newVhd.writeData(
|
||||
startSecondWrite,
|
||||
randomData.slice(startSecondWrite * 512)
|
||||
)
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(randomData)
|
||||
})
|
||||
|
||||
test('BAT can be extended and blocks moved', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler.getSize('randomfile')
|
||||
const newVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.ensureBatSize(2000)
|
||||
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works with empty parent files', async () => {
|
||||
const mbOfRandom = 2
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'empty.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
|
||||
await checkFile('randomfile.vhd')
|
||||
await checkFile('empty.vhd')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
await recoverRawContent('empty.vhd', 'recovered', originalSize)
|
||||
expect(await fs.readFile('recovered')).toEqual(
|
||||
await fs.readFile('randomfile')
|
||||
)
|
||||
})
|
||||
|
||||
test('coalesce works in normal cases', async () => {
|
||||
const mbOfRandom = 5
|
||||
await createRandomFile('randomfile', mbOfRandom)
|
||||
await createRandomFile('small_randomfile', Math.ceil(mbOfRandom / 2))
|
||||
await execa('qemu-img', [
|
||||
'create',
|
||||
'-fvpc',
|
||||
'parent.vhd',
|
||||
mbOfRandom + 1 + 'M',
|
||||
])
|
||||
await convertFromRawToVhd('randomfile', 'child1.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
|
||||
const vhd = new Vhd(handler, 'child2.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
vhd.footer.creatorApplication = 'xoa'
|
||||
await vhd.writeFooter()
|
||||
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child1.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child1.vhd'])
|
||||
await chainVhd(handler, 'child1.vhd', handler, 'child2.vhd', true)
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'child2.vhd'])
|
||||
const smallRandom = await fs.readFile('small_randomfile')
|
||||
const newVhd = new Vhd(handler, 'child2.vhd')
|
||||
await newVhd.readHeaderAndFooter()
|
||||
await newVhd.readBlockTable()
|
||||
await newVhd.writeData(5, smallRandom)
|
||||
await checkFile('child2.vhd')
|
||||
await checkFile('child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child1.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await chainVhd(handler, 'parent.vhd', handler, 'child2.vhd', true)
|
||||
await checkFile('child2.vhd')
|
||||
await vhdMerge(handler, 'parent.vhd', handler, 'child2.vhd')
|
||||
await checkFile('parent.vhd')
|
||||
await recoverRawContent(
|
||||
'parent.vhd',
|
||||
'recovered_from_coalescing',
|
||||
originalSize
|
||||
)
|
||||
await execa('cp', ['randomfile', 'randomfile2'])
|
||||
const fd = await fs.open('randomfile2', 'r+')
|
||||
try {
|
||||
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
|
||||
} finally {
|
||||
await fs.close(fd)
|
||||
}
|
||||
expect(await fs.readFile('recovered_from_coalescing')).toEqual(
|
||||
await fs.readFile('randomfile2')
|
||||
)
|
||||
})
|
||||
|
||||
test('createReadStream passes vhd-util check', async () => {
|
||||
const initalSize = 4
|
||||
await createRandomFile('randomfile', initalSize)
|
||||
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const stream = createReadStream(handler, 'randomfile.vhd')
|
||||
await fromEvent(
|
||||
stream.pipe(await fs.createWriteStream('recovered.vhd')),
|
||||
'finish'
|
||||
)
|
||||
await checkFile('recovered.vhd')
|
||||
})
|
||||
@@ -1,16 +1,18 @@
|
||||
// TODO: remove once completely merged in vhd.js
|
||||
|
||||
import assert from 'assert'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import concurrency from 'limit-concurrency-decorator'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import fu from '@nraynaud/struct-fu'
|
||||
import isEqual from 'lodash/isEqual'
|
||||
import fu from 'struct-fu'
|
||||
import { dirname, relative } from 'path'
|
||||
import { fromEvent } from 'promise-toolbox'
|
||||
|
||||
import type RemoteHandler from './remote-handlers/abstract'
|
||||
import constantStream from './constant-stream'
|
||||
import { noop, streamToBuffer } from './utils'
|
||||
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
|
||||
|
||||
const VHD_UTIL_DEBUG = 0
|
||||
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-util]${str}`) : noop
|
||||
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
|
||||
|
||||
// ===================================================================
|
||||
//
|
||||
@@ -25,7 +27,7 @@ const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-util]${str}`) : noop
|
||||
// Sizes in bytes.
|
||||
const VHD_FOOTER_SIZE = 512
|
||||
const VHD_HEADER_SIZE = 1024
|
||||
const VHD_SECTOR_SIZE = 512
|
||||
export const VHD_SECTOR_SIZE = 512
|
||||
|
||||
// Block allocation table entry size. (Block addr)
|
||||
const VHD_ENTRY_SIZE = 4
|
||||
@@ -34,8 +36,14 @@ const VHD_PARENT_LOCATOR_ENTRIES = 8
|
||||
const VHD_PLATFORM_CODE_NONE = 0
|
||||
|
||||
// Types of backup treated. Others are not supported.
|
||||
const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
|
||||
const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
|
||||
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
|
||||
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
|
||||
|
||||
export const PLATFORM_NONE = 0
|
||||
export const PLATFORM_W2RU = 0x57327275
|
||||
export const PLATFORM_W2KU = 0x57326b75
|
||||
export const PLATFORM_MAC = 0x4d616320
|
||||
export const PLATFORM_MACX = 0x4d616358
|
||||
|
||||
// Other.
|
||||
const BLOCK_UNUSED = 0xffffffff
|
||||
@@ -47,28 +55,24 @@ BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint64 = fu.derive(
|
||||
fu.uint32(2),
|
||||
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
|
||||
_ => _[0] * SIZE_OF_32_BITS + _[1]
|
||||
)
|
||||
|
||||
const fuFooter = fu.struct([
|
||||
fu.char('cookie', 8), // 0
|
||||
fu.uint32('features'), // 8
|
||||
fu.uint32('fileFormatVersion'), // 12
|
||||
fu.struct('dataOffset', [
|
||||
fu.uint32('high'), // 16
|
||||
fu.uint32('low'), // 20
|
||||
]),
|
||||
uint64('dataOffset'), // offset of the header, should always be 512
|
||||
fu.uint32('timestamp'), // 24
|
||||
fu.char('creatorApplication', 4), // 28
|
||||
fu.uint32('creatorVersion'), // 32
|
||||
fu.uint32('creatorHostOs'), // 36
|
||||
fu.struct('originalSize', [
|
||||
// At the creation, current size of the hard disk.
|
||||
fu.uint32('high'), // 40
|
||||
fu.uint32('low'), // 44
|
||||
]),
|
||||
fu.struct('currentSize', [
|
||||
// Current size of the virtual disk. At the creation: currentSize = originalSize.
|
||||
fu.uint32('high'), // 48
|
||||
fu.uint32('low'), // 52
|
||||
]),
|
||||
uint64('originalSize'),
|
||||
uint64('currentSize'),
|
||||
fu.struct('diskGeometry', [
|
||||
fu.uint16('cylinders'), // 56
|
||||
fu.uint8('heads'), // 58
|
||||
@@ -84,12 +88,8 @@ const fuFooter = fu.struct([
|
||||
|
||||
const fuHeader = fu.struct([
|
||||
fu.char('cookie', 8),
|
||||
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
|
||||
fu.struct('tableOffset', [
|
||||
// Absolute byte offset of the Block Allocation Table.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
fu.uint8('dataOffsetUnused', 8),
|
||||
uint64('tableOffset'),
|
||||
fu.uint32('headerVersion'),
|
||||
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
|
||||
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
|
||||
@@ -105,11 +105,7 @@ const fuHeader = fu.struct([
|
||||
fu.uint32('platformDataSpace'),
|
||||
fu.uint32('platformDataLength'),
|
||||
fu.uint32('reserved'),
|
||||
fu.struct('platformDataOffset', [
|
||||
// Absolute byte offset of the locator data.
|
||||
fu.uint32('high'),
|
||||
fu.uint32('low'),
|
||||
]),
|
||||
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
|
||||
],
|
||||
VHD_PARENT_LOCATOR_ENTRIES
|
||||
),
|
||||
@@ -120,16 +116,14 @@ const fuHeader = fu.struct([
|
||||
// Helpers
|
||||
// ===================================================================
|
||||
|
||||
const SIZE_OF_32_BITS = Math.pow(2, 32)
|
||||
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
|
||||
const computeBatSize = entries =>
|
||||
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
|
||||
|
||||
// Returns a 32 bits integer corresponding to a Vhd version.
|
||||
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
|
||||
|
||||
// Sectors conversions.
|
||||
const sectorsRoundUp = bytes =>
|
||||
Math.floor((bytes + VHD_SECTOR_SIZE - 1) / VHD_SECTOR_SIZE)
|
||||
const sectorsRoundUpNoZero = bytes => sectorsRoundUp(bytes) || 1
|
||||
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
|
||||
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
|
||||
|
||||
// Check/Set a bit on a vhd map.
|
||||
@@ -160,29 +154,67 @@ const unpackField = (field, buf) => {
|
||||
|
||||
// Returns the checksum of a raw struct.
|
||||
// The raw struct (footer or header) is altered with the new sum.
|
||||
function checksumStruct (rawStruct, struct) {
|
||||
function checksumStruct (buf, struct) {
|
||||
const checksumField = struct.fields.checksum
|
||||
|
||||
let sum = 0
|
||||
|
||||
// Reset current sum.
|
||||
packField(checksumField, 0, rawStruct)
|
||||
|
||||
for (let i = 0, n = struct.size; i < n; i++) {
|
||||
sum = (sum + rawStruct[i]) & 0xffffffff
|
||||
// Do not use the stored checksum to compute the new checksum.
|
||||
const checksumOffset = checksumField.offset
|
||||
for (let i = 0, n = checksumOffset; i < n; ++i) {
|
||||
sum += buf[i]
|
||||
}
|
||||
for (
|
||||
let i = checksumOffset + checksumField.size, n = struct.size;
|
||||
i < n;
|
||||
++i
|
||||
) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
sum = 0xffffffff - sum
|
||||
sum = ~sum >>> 0
|
||||
|
||||
// Write new sum.
|
||||
packField(checksumField, sum, rawStruct)
|
||||
packField(checksumField, sum, buf)
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
const assertChecksum = (name, buf, struct) => {
|
||||
const actual = unpackField(struct.fields.checksum, buf)
|
||||
const expected = checksumStruct(buf, struct)
|
||||
if (actual !== expected) {
|
||||
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
|
||||
}
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class Vhd {
|
||||
// Format:
|
||||
//
|
||||
// 1. Footer (512)
|
||||
// 2. Header (1024)
|
||||
// 3. Unordered entries
|
||||
// - BAT (batSize @ header.tableOffset)
|
||||
// - Blocks (@ blockOffset(i))
|
||||
// - bitmap (blockBitmapSize)
|
||||
// - data (header.blockSize)
|
||||
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
|
||||
// 4. Footer (512 @ vhdSize - 512)
|
||||
//
|
||||
// Variables:
|
||||
//
|
||||
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
|
||||
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
|
||||
// - blockOffset(i) = bat[i] * sectorSize
|
||||
// - nBlocks = ceil(footer.currentSize / header.blockSize)
|
||||
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
|
||||
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
|
||||
// - sectorSize = 512
|
||||
export class Vhd {
|
||||
get batSize () {
|
||||
return computeBatSize(this.header.maxTableEntries)
|
||||
}
|
||||
|
||||
constructor (handler, path) {
|
||||
this._handler = handler
|
||||
this._path = path
|
||||
@@ -203,21 +235,18 @@ class Vhd {
|
||||
return this._readStream(start, n).then(streamToBuffer)
|
||||
}
|
||||
|
||||
containsBlock (id) {
|
||||
return this._getBatEntry(id) !== BLOCK_UNUSED
|
||||
}
|
||||
|
||||
// Returns the first address after metadata. (In bytes)
|
||||
getEndOfHeaders () {
|
||||
const { header } = this
|
||||
|
||||
let end = uint32ToUint64(this.footer.dataOffset) + VHD_HEADER_SIZE
|
||||
|
||||
const blockAllocationTableSize = sectorsToBytes(
|
||||
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
|
||||
)
|
||||
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
|
||||
// Max(end, block allocation table end)
|
||||
end = Math.max(
|
||||
end,
|
||||
uint32ToUint64(header.tableOffset) + blockAllocationTableSize
|
||||
)
|
||||
end = Math.max(end, header.tableOffset + this.batSize)
|
||||
|
||||
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
|
||||
const entry = header.parentLocatorEntry[i]
|
||||
@@ -225,8 +254,7 @@ class Vhd {
|
||||
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
|
||||
end = Math.max(
|
||||
end,
|
||||
uint32ToUint64(entry.platformDataOffset) +
|
||||
sectorsToBytes(entry.platformDataSpace)
|
||||
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -258,21 +286,16 @@ class Vhd {
|
||||
// Get the beginning (footer + header) of a vhd file.
|
||||
async readHeaderAndFooter () {
|
||||
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
|
||||
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
|
||||
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
|
||||
|
||||
const sum = unpackField(fuFooter.fields.checksum, buf)
|
||||
const sumToTest = checksumStruct(buf, fuFooter)
|
||||
assertChecksum('footer', bufFooter, fuFooter)
|
||||
assertChecksum('header', bufHeader, fuHeader)
|
||||
|
||||
// Checksum child & parent.
|
||||
if (sumToTest !== sum) {
|
||||
throw new Error(
|
||||
`Bad checksum in vhd. Expected: ${sum}. Given: ${sumToTest}. (data=${buf.toString(
|
||||
'hex'
|
||||
)})`
|
||||
)
|
||||
}
|
||||
const footer = (this.footer = fuFooter.unpack(bufFooter))
|
||||
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
|
||||
|
||||
const header = (this.header = fuHeader.unpack(buf.slice(VHD_FOOTER_SIZE)))
|
||||
this.footer = fuFooter.unpack(buf)
|
||||
const header = (this.header = fuHeader.unpack(bufHeader))
|
||||
|
||||
// Compute the number of sectors in one block.
|
||||
// Default: One block contains 4096 sectors of 512 bytes.
|
||||
@@ -302,13 +325,10 @@ class Vhd {
|
||||
// Returns a buffer that contains the block allocation table of a vhd file.
|
||||
async readBlockTable () {
|
||||
const { header } = this
|
||||
|
||||
const offset = uint32ToUint64(header.tableOffset)
|
||||
const size = sectorsToBytes(
|
||||
sectorsRoundUpNoZero(header.maxTableEntries * VHD_ENTRY_SIZE)
|
||||
this.blockTable = await this._read(
|
||||
header.tableOffset,
|
||||
header.maxTableEntries * VHD_ENTRY_SIZE
|
||||
)
|
||||
|
||||
this.blockTable = await this._read(offset, size)
|
||||
}
|
||||
|
||||
// return the first sector (bitmap) of a block
|
||||
@@ -328,18 +348,19 @@ class Vhd {
|
||||
).then(
|
||||
buf =>
|
||||
onlyBitmap
|
||||
? { bitmap: buf }
|
||||
? { id: blockId, bitmap: buf }
|
||||
: {
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
}
|
||||
id: blockId,
|
||||
bitmap: buf.slice(0, this.bitmapSize),
|
||||
data: buf.slice(this.bitmapSize),
|
||||
buffer: buf,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
// get the identifiers and first sectors of the first and last block
|
||||
// in the file
|
||||
//
|
||||
// return undefined if none
|
||||
_getFirstAndLastBlocks () {
|
||||
const n = this.header.maxTableEntries
|
||||
const bat = this.blockTable
|
||||
@@ -353,7 +374,9 @@ class Vhd {
|
||||
j += VHD_ENTRY_SIZE
|
||||
|
||||
if (i === n) {
|
||||
throw new Error('no allocated block found')
|
||||
const error = new Error('no allocated block found')
|
||||
error.noBlock = true
|
||||
throw error
|
||||
}
|
||||
}
|
||||
lastSector = firstSector
|
||||
@@ -383,80 +406,87 @@ class Vhd {
|
||||
// =================================================================
|
||||
|
||||
// Write a buffer/stream at a given position in a vhd file.
|
||||
_write (data, offset) {
|
||||
async _write (data, offset) {
|
||||
debug(
|
||||
`_write offset=${offset} size=${
|
||||
Buffer.isBuffer(data) ? data.length : '???'
|
||||
}`
|
||||
)
|
||||
// TODO: could probably be merged in remote handlers.
|
||||
return this._handler
|
||||
.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
.then(
|
||||
Buffer.isBuffer(data)
|
||||
? stream =>
|
||||
new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: stream => eventToPromise(data.pipe(stream), 'finish')
|
||||
)
|
||||
const stream = await this._handler.createOutputStream(this._path, {
|
||||
flags: 'r+',
|
||||
start: offset,
|
||||
})
|
||||
return Buffer.isBuffer(data)
|
||||
? new Promise((resolve, reject) => {
|
||||
stream.on('error', reject)
|
||||
stream.end(data, resolve)
|
||||
})
|
||||
: fromEvent(data.pipe(stream), 'finish')
|
||||
}
|
||||
|
||||
async ensureBatSize (size) {
|
||||
const { header } = this
|
||||
async _freeFirstBlockSpace (spaceNeededBytes) {
|
||||
try {
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
const tableOffset = this.header.tableOffset
|
||||
const { batSize } = this
|
||||
const newMinSector = Math.ceil(
|
||||
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
|
||||
)
|
||||
if (
|
||||
tableOffset + batSize + spaceNeededBytes >=
|
||||
sectorsToBytes(firstSector)
|
||||
) {
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = Math.max(
|
||||
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
|
||||
newMinSector
|
||||
)
|
||||
debug(
|
||||
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
|
||||
)
|
||||
// copy the first block at the end
|
||||
const stream = await this._readStream(
|
||||
sectorsToBytes(firstSector),
|
||||
fullBlockSize
|
||||
)
|
||||
await this._write(stream, sectorsToBytes(newFirstSector))
|
||||
await this._setBatEntry(first, newFirstSector)
|
||||
await this.writeFooter(true)
|
||||
spaceNeededBytes -= this.fullBlockSize
|
||||
if (spaceNeededBytes > 0) {
|
||||
return this._freeFirstBlockSpace(spaceNeededBytes)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
if (!e.noBlock) {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async ensureBatSize (entries) {
|
||||
const { header } = this
|
||||
const prevMaxTableEntries = header.maxTableEntries
|
||||
if (prevMaxTableEntries >= size) {
|
||||
if (prevMaxTableEntries >= entries) {
|
||||
return
|
||||
}
|
||||
|
||||
const tableOffset = uint32ToUint64(header.tableOffset)
|
||||
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
|
||||
|
||||
// extend BAT
|
||||
const maxTableEntries = (header.maxTableEntries = size)
|
||||
const batSize = maxTableEntries * VHD_ENTRY_SIZE
|
||||
const newBatSize = computeBatSize(entries)
|
||||
await this._freeFirstBlockSpace(newBatSize - this.batSize)
|
||||
const maxTableEntries = (header.maxTableEntries = entries)
|
||||
const prevBat = this.blockTable
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(batSize))
|
||||
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
|
||||
prevBat.copy(bat)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevBat.length)
|
||||
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
|
||||
debug(
|
||||
`ensureBatSize: extend in memory BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
|
||||
const extendBat = () => {
|
||||
debug(
|
||||
`ensureBatSize: extend in file BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
|
||||
)
|
||||
|
||||
return this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
tableOffset + prevBat.length
|
||||
)
|
||||
}
|
||||
|
||||
if (tableOffset + batSize < sectorsToBytes(firstSector)) {
|
||||
return Promise.all([extendBat(), this.writeHeader()])
|
||||
}
|
||||
|
||||
const { fullBlockSize } = this
|
||||
const newFirstSector = lastSector + fullBlockSize / VHD_SECTOR_SIZE
|
||||
debug(`ensureBatSize: move first block ${firstSector} -> ${newFirstSector}`)
|
||||
|
||||
return Promise.all([
|
||||
// copy the first block at the end
|
||||
this._readStream(sectorsToBytes(firstSector), fullBlockSize)
|
||||
.then(stream => this._write(stream, sectorsToBytes(newFirstSector)))
|
||||
.then(extendBat),
|
||||
|
||||
this._setBatEntry(first, newFirstSector),
|
||||
this.writeHeader(),
|
||||
this.writeFooter(),
|
||||
])
|
||||
await this._write(
|
||||
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
|
||||
header.tableOffset + prevBat.length
|
||||
)
|
||||
await this.writeHeader()
|
||||
}
|
||||
|
||||
// set the first sector (bitmap) of a block
|
||||
@@ -468,7 +498,7 @@ class Vhd {
|
||||
|
||||
return this._write(
|
||||
blockTable.slice(i, i + VHD_ENTRY_SIZE),
|
||||
uint32ToUint64(this.header.tableOffset) + i
|
||||
this.header.tableOffset + i
|
||||
)
|
||||
}
|
||||
|
||||
@@ -510,12 +540,24 @@ class Vhd {
|
||||
await this._write(bitmap, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors (block, beginSectorId, endSectorId) {
|
||||
async writeEntireBlock (block) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
}
|
||||
await this._write(block.buffer, sectorsToBytes(blockAddr))
|
||||
}
|
||||
|
||||
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
|
||||
let blockAddr = this._getBatEntry(block.id)
|
||||
|
||||
if (blockAddr === BLOCK_UNUSED) {
|
||||
blockAddr = await this.createBlock(block.id)
|
||||
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
|
||||
} else if (parentBitmap === undefined) {
|
||||
parentBitmap = (await this._readBlock(block.id, true)).bitmap
|
||||
}
|
||||
|
||||
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
|
||||
|
||||
@@ -525,6 +567,11 @@ class Vhd {
|
||||
}, sectors=${beginSectorId}...${endSectorId}`
|
||||
)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(parentBitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, parentBitmap)
|
||||
await this._write(
|
||||
block.data.slice(
|
||||
sectorsToBytes(beginSectorId),
|
||||
@@ -532,20 +579,11 @@ class Vhd {
|
||||
),
|
||||
sectorsToBytes(offset)
|
||||
)
|
||||
|
||||
const { bitmap } = await this._readBlock(block.id, true)
|
||||
|
||||
for (let i = beginSectorId; i < endSectorId; ++i) {
|
||||
mapSetBit(bitmap, i)
|
||||
}
|
||||
|
||||
await this.writeBlockBitmap(blockAddr, bitmap)
|
||||
}
|
||||
|
||||
// Merge block id (of vhd child) into vhd parent.
|
||||
async coalesceBlock (child, blockId) {
|
||||
// Get block data and bitmap of block id.
|
||||
const { bitmap, data } = await child._readBlock(blockId)
|
||||
const block = await child._readBlock(blockId)
|
||||
const { bitmap, data } = block
|
||||
|
||||
debug(`coalesceBlock block=${blockId}`)
|
||||
|
||||
@@ -556,7 +594,7 @@ class Vhd {
|
||||
if (!mapTestBit(bitmap, i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
let parentBitmap = null
|
||||
let endSector = i + 1
|
||||
|
||||
// Count changed sectors.
|
||||
@@ -566,7 +604,16 @@ class Vhd {
|
||||
|
||||
// Write n sectors into parent.
|
||||
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
|
||||
await this.writeBlockSectors({ id: blockId, data }, i, endSector)
|
||||
|
||||
const isFullBlock = i === 0 && endSector === sectorsPerBlock
|
||||
if (isFullBlock) {
|
||||
await this.writeEntireBlock(block)
|
||||
} else {
|
||||
if (parentBitmap === null) {
|
||||
parentBitmap = (await this._readBlock(blockId, true)).bitmap
|
||||
}
|
||||
await this.writeBlockSectors(block, i, endSector, parentBitmap)
|
||||
}
|
||||
|
||||
i = endSector
|
||||
}
|
||||
@@ -576,11 +623,13 @@ class Vhd {
|
||||
}
|
||||
|
||||
// Write a context footer. (At the end and beginning of a vhd file.)
|
||||
async writeFooter () {
|
||||
async writeFooter (onlyEndFooter = false) {
|
||||
const { footer } = this
|
||||
|
||||
const offset = this.getEndOfData()
|
||||
const rawFooter = fuFooter.pack(footer)
|
||||
const eof = await this._handler.getSize(this._path)
|
||||
// sometimes the file is longer than anticipated, we still need to put the footer at the end
|
||||
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
|
||||
|
||||
footer.checksum = checksumStruct(rawFooter, fuFooter)
|
||||
debug(
|
||||
@@ -588,8 +637,9 @@ class Vhd {
|
||||
footer.checksum
|
||||
}). (data=${rawFooter.toString('hex')})`
|
||||
)
|
||||
|
||||
await this._write(rawFooter, 0)
|
||||
if (!onlyEndFooter) {
|
||||
await this._write(rawFooter, 0)
|
||||
}
|
||||
await this._write(rawFooter, offset)
|
||||
}
|
||||
|
||||
@@ -605,6 +655,73 @@ class Vhd {
|
||||
)
|
||||
return this._write(rawHeader, offset)
|
||||
}
|
||||
|
||||
async writeData (offsetSectors, buffer) {
|
||||
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
|
||||
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
|
||||
const endBufferSectors = offsetSectors + bufferSizeSectors
|
||||
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
|
||||
await this.ensureBatSize(lastBlock)
|
||||
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
|
||||
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
|
||||
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
|
||||
|
||||
for (
|
||||
let currentBlock = startBlock;
|
||||
currentBlock <= lastBlock;
|
||||
currentBlock++
|
||||
) {
|
||||
const offsetInBlockSectors = Math.max(
|
||||
0,
|
||||
offsetSectors - currentBlock * this.sectorsPerBlock
|
||||
)
|
||||
const endInBlockSectors = Math.min(
|
||||
endBufferSectors - currentBlock * this.sectorsPerBlock,
|
||||
this.sectorsPerBlock
|
||||
)
|
||||
const startInBuffer = Math.max(
|
||||
0,
|
||||
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
|
||||
)
|
||||
const endInBuffer = Math.min(
|
||||
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
|
||||
VHD_SECTOR_SIZE,
|
||||
buffer.length
|
||||
)
|
||||
let inputBuffer
|
||||
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
|
||||
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
|
||||
} else {
|
||||
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
|
||||
buffer.copy(
|
||||
inputBuffer,
|
||||
offsetInBlockSectors * VHD_SECTOR_SIZE,
|
||||
startInBuffer,
|
||||
endInBuffer
|
||||
)
|
||||
}
|
||||
await this.writeBlockSectors(
|
||||
{ id: currentBlock, data: inputBuffer },
|
||||
offsetInBlockSectors,
|
||||
endInBlockSectors
|
||||
)
|
||||
}
|
||||
await this.writeFooter()
|
||||
}
|
||||
|
||||
async ensureSpaceForParentLocators (neededSectors) {
|
||||
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
|
||||
const currentSpace =
|
||||
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
|
||||
firstLocatorOffset / VHD_SECTOR_SIZE
|
||||
if (currentSpace < neededSectors) {
|
||||
const deltaSectors = neededSectors - currentSpace
|
||||
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
|
||||
this.header.tableOffset += sectorsToBytes(deltaSectors)
|
||||
await this._write(this.blockTable, this.header.tableOffset)
|
||||
}
|
||||
return firstLocatorOffset
|
||||
}
|
||||
}
|
||||
|
||||
// Merge vhd child into vhd parent.
|
||||
@@ -619,61 +736,70 @@ export default concurrency(2)(async function vhdMerge (
|
||||
childHandler,
|
||||
childPath
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
const parentFd = await parentHandler.openFile(parentPath, 'r+')
|
||||
try {
|
||||
const parentVhd = new Vhd(parentHandler, parentFd)
|
||||
const childFd = await childHandler.openFile(childPath, 'r')
|
||||
try {
|
||||
const childVhd = new Vhd(childHandler, childFd)
|
||||
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
// Reading footer and header.
|
||||
await Promise.all([
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
|
||||
|
||||
// Child must be a delta.
|
||||
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
throw new Error('Unable to merge, child is not a delta backup.')
|
||||
}
|
||||
// Child must be a delta.
|
||||
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
throw new Error('Unable to merge, child is not a delta backup.')
|
||||
}
|
||||
|
||||
// Merging in differencing disk is prohibited in our case.
|
||||
if (parentVhd.footer.diskType !== HARD_DISK_TYPE_DYNAMIC) {
|
||||
throw new Error('Unable to merge, parent is not a full backup.')
|
||||
}
|
||||
// Allocation table map is not yet implemented.
|
||||
if (
|
||||
parentVhd.hasBlockAllocationTableMap() ||
|
||||
childVhd.hasBlockAllocationTableMap()
|
||||
) {
|
||||
throw new Error('Unsupported allocation table map.')
|
||||
}
|
||||
|
||||
// Allocation table map is not yet implemented.
|
||||
if (
|
||||
parentVhd.hasBlockAllocationTableMap() ||
|
||||
childVhd.hasBlockAllocationTableMap()
|
||||
) {
|
||||
throw new Error('Unsupported allocation table map.')
|
||||
}
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
|
||||
|
||||
// Read allocation table of child/parent.
|
||||
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
|
||||
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
|
||||
let mergedDataSize = 0
|
||||
for (
|
||||
let blockId = 0;
|
||||
blockId < childVhd.header.maxTableEntries;
|
||||
blockId++
|
||||
) {
|
||||
if (childVhd.containsBlock(blockId)) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
}
|
||||
}
|
||||
|
||||
let mergedDataSize = 0
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
for (let blockId = 0; blockId < childVhd.header.maxTableEntries; blockId++) {
|
||||
if (childVhd._getBatEntry(blockId) !== BLOCK_UNUSED) {
|
||||
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
|
||||
pFooter.currentSize = cFooter.currentSize
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = cFooter.originalSize
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
pFooter.uuid = cFooter.uuid
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
} finally {
|
||||
await childHandler.closeFile(childFd)
|
||||
}
|
||||
} finally {
|
||||
await parentHandler.closeFile(parentFd)
|
||||
}
|
||||
|
||||
const cFooter = childVhd.footer
|
||||
const pFooter = parentVhd.footer
|
||||
|
||||
pFooter.currentSize = { ...cFooter.currentSize }
|
||||
pFooter.diskGeometry = { ...cFooter.diskGeometry }
|
||||
pFooter.originalSize = { ...cFooter.originalSize }
|
||||
pFooter.timestamp = cFooter.timestamp
|
||||
|
||||
// necessary to update values and to recreate the footer after block
|
||||
// creation
|
||||
await parentVhd.writeFooter()
|
||||
|
||||
return mergedDataSize
|
||||
})
|
||||
|
||||
// returns true if the child was actually modified
|
||||
@@ -681,42 +807,189 @@ export async function chainVhd (
|
||||
parentHandler,
|
||||
parentPath,
|
||||
childHandler,
|
||||
childPath
|
||||
childPath,
|
||||
force = false
|
||||
) {
|
||||
const parentVhd = new Vhd(parentHandler, parentPath)
|
||||
const childVhd = new Vhd(childHandler, childPath)
|
||||
|
||||
await childVhd.readHeaderAndFooter()
|
||||
const { header, footer } = childVhd
|
||||
|
||||
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
|
||||
if (!force) {
|
||||
throw new Error('cannot chain disk of type ' + footer.diskType)
|
||||
}
|
||||
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
childVhd.readBlockTable(),
|
||||
parentVhd.readHeaderAndFooter(),
|
||||
childVhd.readHeaderAndFooter(),
|
||||
])
|
||||
|
||||
const { header } = childVhd
|
||||
const parentName = relative(dirname(childPath), parentPath)
|
||||
|
||||
const parentName = parentPath.split('/').pop()
|
||||
const parentUuid = parentVhd.footer.uuid
|
||||
if (
|
||||
header.parentUnicodeName !== parentName ||
|
||||
!isEqual(header.parentUuid, parentUuid)
|
||||
) {
|
||||
header.parentUuid = parentUuid
|
||||
header.parentUnicodeName = parentName
|
||||
await childVhd.writeHeader()
|
||||
return true
|
||||
header.parentUuid = parentVhd.footer.uuid
|
||||
header.parentUnicodeName = parentName
|
||||
|
||||
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
|
||||
const encodedFilename = Buffer.from(parentName, 'utf16le')
|
||||
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
|
||||
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
|
||||
await childVhd._write(encodedFilename, position)
|
||||
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
|
||||
dataSpaceSectors
|
||||
)
|
||||
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
|
||||
header.parentLocatorEntry[0].platformDataOffset = position
|
||||
for (let i = 1; i < 8; i++) {
|
||||
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
|
||||
header.parentLocatorEntry[i].platformDataSpace = 0
|
||||
header.parentLocatorEntry[i].platformDataLength = 0
|
||||
header.parentLocatorEntry[i].platformDataOffset = 0
|
||||
}
|
||||
await childVhd.writeHeader()
|
||||
await childVhd.writeFooter()
|
||||
return true
|
||||
}
|
||||
|
||||
// The checksum was broken between xo-server v5.2.4 and v5.2.5
|
||||
//
|
||||
// Replace by a correct checksum if necessary.
|
||||
//
|
||||
// TODO: remove when enough time as passed (6 months).
|
||||
{
|
||||
const rawHeader = fuHeader.pack(header)
|
||||
const checksum = checksumStruct(rawHeader, fuHeader)
|
||||
if (checksum !== header.checksum) {
|
||||
await childVhd._write(rawHeader, VHD_FOOTER_SIZE)
|
||||
return true
|
||||
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
|
||||
const fds = []
|
||||
|
||||
try {
|
||||
const vhds = []
|
||||
while (true) {
|
||||
const fd = yield handler.openFile(path, 'r')
|
||||
fds.push(fd)
|
||||
const vhd = new Vhd(handler, fd)
|
||||
vhds.push(vhd)
|
||||
yield vhd.readHeaderAndFooter()
|
||||
yield vhd.readBlockTable()
|
||||
|
||||
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
|
||||
break
|
||||
}
|
||||
|
||||
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
|
||||
}
|
||||
const nVhds = vhds.length
|
||||
|
||||
// this the VHD we want to synthetize
|
||||
const vhd = vhds[0]
|
||||
|
||||
// data of our synthetic VHD
|
||||
// TODO: empty parentUuid and parentLocatorEntry-s in header
|
||||
let header = {
|
||||
...vhd.header,
|
||||
tableOffset: 512 + 1024,
|
||||
parentUnicodeName: '',
|
||||
}
|
||||
|
||||
const bat = Buffer.allocUnsafe(
|
||||
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
|
||||
)
|
||||
let footer = {
|
||||
...vhd.footer,
|
||||
diskType: HARD_DISK_TYPE_DYNAMIC,
|
||||
}
|
||||
const sectorsPerBlockData = vhd.sectorsPerBlock
|
||||
const sectorsPerBlock =
|
||||
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
|
||||
|
||||
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
|
||||
|
||||
const blocksOwner = new Array(nBlocks)
|
||||
for (
|
||||
let iBlock = 0,
|
||||
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
|
||||
iBlock < nBlocks;
|
||||
++iBlock
|
||||
) {
|
||||
let blockSector = BLOCK_UNUSED
|
||||
for (let i = 0; i < nVhds; ++i) {
|
||||
if (vhds[i].containsBlock(iBlock)) {
|
||||
blocksOwner[iBlock] = i
|
||||
blockSector = blockOffset
|
||||
blockOffset += sectorsPerBlock
|
||||
break
|
||||
}
|
||||
}
|
||||
bat.writeUInt32BE(blockSector, iBlock * 4)
|
||||
}
|
||||
|
||||
footer = fuFooter.pack(footer)
|
||||
checksumStruct(footer, fuFooter)
|
||||
yield footer
|
||||
|
||||
header = fuHeader.pack(header)
|
||||
checksumStruct(header, fuHeader)
|
||||
yield header
|
||||
|
||||
yield bat
|
||||
|
||||
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
|
||||
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
|
||||
const owner = blocksOwner[iBlock]
|
||||
if (owner === undefined) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield bitmap
|
||||
|
||||
const blocksByVhd = new Map()
|
||||
const emitBlockSectors = function * (iVhd, i, n) {
|
||||
const vhd = vhds[iVhd]
|
||||
const isRootVhd = vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC
|
||||
if (!vhd.containsBlock(iBlock)) {
|
||||
if (isRootVhd) {
|
||||
yield Buffer.alloc((n - i) * VHD_SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, i, n)
|
||||
}
|
||||
return
|
||||
}
|
||||
let block = blocksByVhd.get(vhd)
|
||||
if (block === undefined) {
|
||||
block = yield vhd._readBlock(iBlock)
|
||||
blocksByVhd.set(vhd, block)
|
||||
}
|
||||
const { bitmap, data } = block
|
||||
if (isRootVhd) {
|
||||
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
|
||||
return
|
||||
}
|
||||
while (i < n) {
|
||||
const hasData = mapTestBit(bitmap, i)
|
||||
const start = i
|
||||
do {
|
||||
++i
|
||||
} while (i < n && mapTestBit(bitmap, i) === hasData)
|
||||
if (hasData) {
|
||||
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
|
||||
} else {
|
||||
yield * emitBlockSectors(iVhd + 1, start, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
|
||||
}
|
||||
|
||||
yield footer
|
||||
} finally {
|
||||
for (let i = 0, n = fds.length; i < n; ++i) {
|
||||
handler.closeFile(fds[i]).catch(error => {
|
||||
console.warn('createReadStream, closeFd', i, error)
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return false
|
||||
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
}
|
||||
}
|
||||
|
||||
72
packages/xo-server/src/vhd-test.js
Normal file
72
packages/xo-server/src/vhd-test.js
Normal file
@@ -0,0 +1,72 @@
|
||||
import execa from 'execa'
|
||||
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
|
||||
import LocalHandler from './remote-handlers/local.js'
|
||||
|
||||
async function testVhdMerge () {
|
||||
console.log('before merge')
|
||||
const moOfRandom = 4
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
|
||||
])
|
||||
await execa('bash', [
|
||||
'-c',
|
||||
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
|
||||
])
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'raw',
|
||||
'-Ovpc',
|
||||
'randomfile',
|
||||
'randomfile.vhd',
|
||||
])
|
||||
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
|
||||
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
|
||||
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
|
||||
|
||||
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
|
||||
const originalSize = await handler._getSize('randomfile')
|
||||
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
const childVhd = new Vhd(handler, 'randomfile.vhd')
|
||||
console.log('changing type')
|
||||
await childVhd.readHeaderAndFooter()
|
||||
console.log('child vhd', childVhd.footer.currentSize, originalSize)
|
||||
await childVhd.readBlockTable()
|
||||
childVhd.footer.diskType = 4 // Delta backup.
|
||||
await childVhd.writeFooter()
|
||||
console.log('chained')
|
||||
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
|
||||
console.log('merged')
|
||||
const parentVhd = new Vhd(handler, 'empty.vhd')
|
||||
await parentVhd.readHeaderAndFooter()
|
||||
console.log('parent vhd', parentVhd.footer.currentSize)
|
||||
|
||||
await execa('qemu-img', [
|
||||
'convert',
|
||||
'-f',
|
||||
'vpc',
|
||||
'-Oraw',
|
||||
'empty.vhd',
|
||||
'recovered',
|
||||
])
|
||||
await execa('truncate', ['-s', originalSize, 'recovered'])
|
||||
console.log('ls', (await execa('ls', ['-lt'])).stdout)
|
||||
console.log(
|
||||
'diff',
|
||||
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
|
||||
)
|
||||
|
||||
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
|
||||
await vhd.readHeaderAndFooter()
|
||||
await vhd.readBlockTable()
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
await vhd.ensureBatSize(300)
|
||||
|
||||
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
|
||||
*/
|
||||
console.log(await handler.list())
|
||||
console.log('lol')
|
||||
}
|
||||
|
||||
export { testVhdMerge as default }
|
||||
@@ -232,13 +232,11 @@ const TRANSFORMS = {
|
||||
}
|
||||
|
||||
const { major, minor } = guestMetrics.PV_drivers_version
|
||||
const [hostMajor, hostMinor] = (
|
||||
obj.$resident_on || obj.$pool.$master
|
||||
).software_version.product_version.split('.')
|
||||
|
||||
return major >= hostMajor && minor >= hostMinor
|
||||
? 'up to date'
|
||||
: 'out of date'
|
||||
return {
|
||||
major,
|
||||
minor,
|
||||
}
|
||||
})()
|
||||
|
||||
let resourceSet = otherConfig['xo:resource_set']
|
||||
@@ -512,9 +510,7 @@ const TRANSFORMS = {
|
||||
vdi.type += '-snapshot'
|
||||
vdi.snapshot_time = toTimestamp(obj.snapshot_time)
|
||||
vdi.$snapshot_of = link(obj, 'snapshot_of')
|
||||
}
|
||||
|
||||
if (!obj.managed) {
|
||||
} else if (!obj.managed) {
|
||||
vdi.type += '-unmanaged'
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,32 @@
|
||||
import endsWith from 'lodash/endsWith'
|
||||
import JSON5 from 'json5'
|
||||
import limitConcurrency from 'limit-concurrency-decorator'
|
||||
import { BaseError } from 'make-error'
|
||||
import {
|
||||
endsWith,
|
||||
findKey,
|
||||
forEach,
|
||||
get,
|
||||
identity,
|
||||
map,
|
||||
mapValues,
|
||||
mean,
|
||||
sum,
|
||||
uniq,
|
||||
zipWith,
|
||||
} from 'lodash'
|
||||
|
||||
import { parseDateTime } from './xapi'
|
||||
|
||||
export class FaultyGranularity extends BaseError {}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// according to https://xapi-project.github.io/xen-api/metrics.html
|
||||
// The values are stored at intervals of:
|
||||
// - 5 seconds for the past 10 minutes
|
||||
// - one minute for the past 2 hours
|
||||
// - one hour for the past week
|
||||
// - one day for the past year
|
||||
const RRD_STEP_SECONDS = 5
|
||||
const RRD_STEP_MINUTES = 60
|
||||
const RRD_STEP_HOURS = 3600
|
||||
@@ -17,6 +39,7 @@ const RRD_STEP_FROM_STRING = {
|
||||
days: RRD_STEP_DAYS,
|
||||
}
|
||||
|
||||
// points = intervalInSeconds / step
|
||||
const RRD_POINTS_PER_STEP = {
|
||||
[RRD_STEP_SECONDS]: 120,
|
||||
[RRD_STEP_MINUTES]: 120,
|
||||
@@ -24,16 +47,6 @@ const RRD_POINTS_PER_STEP = {
|
||||
[RRD_STEP_DAYS]: 366,
|
||||
}
|
||||
|
||||
export class XapiStatsError extends BaseError {}
|
||||
|
||||
export class UnknownLegendFormat extends XapiStatsError {
|
||||
constructor (line) {
|
||||
super('Unknown legend line: ' + line)
|
||||
}
|
||||
}
|
||||
|
||||
export class FaultyGranularity extends XapiStatsError {}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Utils
|
||||
// -------------------------------------------------------------------
|
||||
@@ -47,353 +60,185 @@ function convertNanToNull (value) {
|
||||
return isNaN(value) ? null : value
|
||||
}
|
||||
|
||||
async function getServerTimestamp (xapi, host) {
|
||||
const serverLocalTime = await xapi.call('host.get_servertime', host.$ref)
|
||||
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1000)
|
||||
async function getServerTimestamp (xapi, hostRef) {
|
||||
const serverLocalTime = await xapi.call('host.get_servertime', hostRef)
|
||||
return Math.floor(parseDateTime(serverLocalTime).getTime() / 1e3)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Stats
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getNewHostStats () {
|
||||
return {
|
||||
cpus: [],
|
||||
pifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
load: [],
|
||||
memory: [],
|
||||
memoryFree: [],
|
||||
memoryUsed: [],
|
||||
const computeValues = (dataRow, legendIndex, transformValue = identity) =>
|
||||
map(dataRow, ({ values }) =>
|
||||
transformValue(convertNanToNull(values[legendIndex]))
|
||||
)
|
||||
|
||||
const combineStats = (stats, path, combineValues) =>
|
||||
zipWith(...map(stats, path), (...values) => combineValues(values))
|
||||
|
||||
// It browse the object in depth and initialise it's properties
|
||||
// The targerPath can be a string or an array containing the depth
|
||||
// targetPath: [a, b, c] => a.b.c
|
||||
const getValuesFromDepth = (obj, targetPath) => {
|
||||
if (typeof targetPath === 'string') {
|
||||
return (obj[targetPath] = [])
|
||||
}
|
||||
}
|
||||
|
||||
function getNewVmStats () {
|
||||
return {
|
||||
cpus: [],
|
||||
vifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
xvds: {
|
||||
r: {},
|
||||
w: {},
|
||||
},
|
||||
memory: [],
|
||||
memoryFree: [],
|
||||
memoryUsed: [],
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Stats legends
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
function getNewHostLegends () {
|
||||
return {
|
||||
cpus: [],
|
||||
pifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
load: null,
|
||||
memoryFree: null,
|
||||
memory: null,
|
||||
}
|
||||
}
|
||||
|
||||
function getNewVmLegends () {
|
||||
return {
|
||||
cpus: [],
|
||||
vifs: {
|
||||
rx: [],
|
||||
tx: [],
|
||||
},
|
||||
xvds: {
|
||||
r: [],
|
||||
w: [],
|
||||
},
|
||||
memoryFree: null,
|
||||
memory: null,
|
||||
}
|
||||
}
|
||||
|
||||
// Compute one legend line for one host
|
||||
function parseOneHostLegend (hostLegend, type, index) {
|
||||
let resReg
|
||||
|
||||
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
|
||||
hostLegend.cpus[resReg[1]] = index
|
||||
} else if ((resReg = /^pif_eth([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
|
||||
if (resReg[2] === 'rx') {
|
||||
hostLegend.pifs.rx[resReg[1]] = index
|
||||
} else {
|
||||
hostLegend.pifs.tx[resReg[1]] = index
|
||||
forEach(targetPath, (path, key) => {
|
||||
if (obj[path] === undefined) {
|
||||
obj = obj[path] = targetPath.length - 1 === key ? [] : {}
|
||||
return
|
||||
}
|
||||
} else if (type === 'loadavg') {
|
||||
hostLegend.load = index
|
||||
} else if (type === 'memory_free_kib') {
|
||||
hostLegend.memoryFree = index
|
||||
} else if (type === 'memory_total_kib') {
|
||||
hostLegend.memory = index
|
||||
}
|
||||
obj = obj[path]
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Compute one legend line for one vm
|
||||
function parseOneVmLegend (vmLegend, type, index) {
|
||||
let resReg
|
||||
const testMetric = (test, type) =>
|
||||
typeof test === 'string'
|
||||
? test === type
|
||||
: typeof test === 'function' ? test(type) : test.exec(type)
|
||||
|
||||
if ((resReg = /^cpu([0-9]+)$/.exec(type)) !== null) {
|
||||
vmLegend.cpus[resReg[1]] = index
|
||||
} else if ((resReg = /^vif_([0-9]+)_(rx|tx)$/.exec(type)) !== null) {
|
||||
if (resReg[2] === 'rx') {
|
||||
vmLegend.vifs.rx[resReg[1]] = index
|
||||
} else {
|
||||
vmLegend.vifs.tx[resReg[1]] = index
|
||||
}
|
||||
} else if ((resReg = /^vbd_xvd(.)_(read|write)$/.exec(type))) {
|
||||
if (resReg[2] === 'read') {
|
||||
vmLegend.xvds.r[resReg[1]] = index
|
||||
} else {
|
||||
vmLegend.xvds.w[resReg[1]] = index
|
||||
}
|
||||
} else if (type === 'memory_internal_free') {
|
||||
vmLegend.memoryFree = index
|
||||
} else if (endsWith(type, 'memory')) {
|
||||
vmLegend.memory = index
|
||||
}
|
||||
}
|
||||
const findMetric = (metrics, metricType) => {
|
||||
let testResult
|
||||
let metric
|
||||
|
||||
// Compute Stats Legends for host and vms from RRD update
|
||||
function parseLegends (json) {
|
||||
const hostLegends = getNewHostLegends()
|
||||
const vmsLegends = {}
|
||||
forEach(metrics, (current, key) => {
|
||||
if (current.test === undefined) {
|
||||
const newValues = findMetric(current, metricType)
|
||||
|
||||
json.meta.legend.forEach((value, index) => {
|
||||
const parsedLine = /^AVERAGE:(host|vm):(.+):(.+)$/.exec(value)
|
||||
|
||||
if (parsedLine === null) {
|
||||
throw new UnknownLegendFormat(value)
|
||||
}
|
||||
|
||||
const [, name, uuid, type] = parsedLine
|
||||
|
||||
if (name !== 'vm') {
|
||||
parseOneHostLegend(hostLegends, type, index)
|
||||
} else {
|
||||
if (vmsLegends[uuid] === undefined) {
|
||||
vmsLegends[uuid] = getNewVmLegends()
|
||||
metric = newValues.metric
|
||||
if (metric !== undefined) {
|
||||
testResult = newValues.testResult
|
||||
return false
|
||||
}
|
||||
|
||||
parseOneVmLegend(vmsLegends[uuid], type, index)
|
||||
} else if ((testResult = testMetric(current.test, metricType))) {
|
||||
metric = current
|
||||
return false
|
||||
}
|
||||
})
|
||||
|
||||
return [hostLegends, vmsLegends]
|
||||
return { metric, testResult }
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// The metrics:
|
||||
// test: can be a function, regexp or string, default to: currentKey
|
||||
// getPath: default to: () => currentKey
|
||||
// transformValue: default to: identity
|
||||
const STATS = {
|
||||
host: {
|
||||
load: {
|
||||
test: 'loadavg',
|
||||
},
|
||||
memoryFree: {
|
||||
test: 'memory_free_kib',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
memory: {
|
||||
test: 'memory_total_kib',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
cpus: {
|
||||
test: /^cpu(\d+)$/,
|
||||
getPath: matches => ['cpus', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
pifs: {
|
||||
rx: {
|
||||
test: /^pif_eth(\d+)_rx$/,
|
||||
getPath: matches => ['pifs', 'rx', matches[1]],
|
||||
},
|
||||
tx: {
|
||||
test: /^pif_eth(\d+)_tx$/,
|
||||
getPath: matches => ['pifs', 'tx', matches[1]],
|
||||
},
|
||||
},
|
||||
iops: {
|
||||
r: {
|
||||
test: /^iops_read_(\w+)$/,
|
||||
getPath: matches => ['iops', 'r', matches[1]],
|
||||
},
|
||||
w: {
|
||||
test: /^iops_write_(\w+)$/,
|
||||
getPath: matches => ['iops', 'w', matches[1]],
|
||||
},
|
||||
},
|
||||
ioThroughput: {
|
||||
r: {
|
||||
test: /^io_throughput_read_(\w+)$/,
|
||||
getPath: matches => ['ioThroughput', 'r', matches[1]],
|
||||
transformValue: value => value * 2 ** 20,
|
||||
},
|
||||
w: {
|
||||
test: /^io_throughput_write_(\w+)$/,
|
||||
getPath: matches => ['ioThroughput', 'w', matches[1]],
|
||||
transformValue: value => value * 2 ** 20,
|
||||
},
|
||||
},
|
||||
latency: {
|
||||
r: {
|
||||
test: /^read_latency_(\w+)$/,
|
||||
getPath: matches => ['latency', 'r', matches[1]],
|
||||
transformValue: value => value / 1e3,
|
||||
},
|
||||
w: {
|
||||
test: /^write_latency_(\w+)$/,
|
||||
getPath: matches => ['latency', 'w', matches[1]],
|
||||
transformValue: value => value / 1e3,
|
||||
},
|
||||
},
|
||||
iowait: {
|
||||
test: /^iowait_(\w+)$/,
|
||||
getPath: matches => ['iowait', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
},
|
||||
vm: {
|
||||
memoryFree: {
|
||||
test: 'memory_internal_free',
|
||||
transformValue: value => value * 1024,
|
||||
},
|
||||
memory: {
|
||||
test: metricType => endsWith(metricType, 'memory'),
|
||||
},
|
||||
cpus: {
|
||||
test: /^cpu(\d+)$/,
|
||||
getPath: matches => ['cpus', matches[1]],
|
||||
transformValue: value => value * 1e2,
|
||||
},
|
||||
vifs: {
|
||||
rx: {
|
||||
test: /^vif_(\d+)_rx$/,
|
||||
getPath: matches => ['vifs', 'rx', matches[1]],
|
||||
},
|
||||
tx: {
|
||||
test: /^vif_(\d+)_tx$/,
|
||||
getPath: matches => ['vifs', 'tx', matches[1]],
|
||||
},
|
||||
},
|
||||
xvds: {
|
||||
r: {
|
||||
test: /^vbd_xvd(.)_read$/,
|
||||
getPath: matches => ['xvds', 'r', matches[1]],
|
||||
},
|
||||
w: {
|
||||
test: /^vbd_xvd(.)_write$/,
|
||||
getPath: matches => ['xvds', 'w', matches[1]],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export default class XapiStats {
|
||||
constructor () {
|
||||
this._vms = {}
|
||||
this._hosts = {}
|
||||
this._statsByObject = {}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Remove stats (Helper)
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
_removeOlderStats (source, dest, pointsPerStep) {
|
||||
for (const key in source) {
|
||||
if (key === 'cpus') {
|
||||
for (const cpuIndex in source.cpus) {
|
||||
dest.cpus[cpuIndex].splice(
|
||||
0,
|
||||
dest.cpus[cpuIndex].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of cpus has been decreased, remove !
|
||||
let offset
|
||||
|
||||
if ((offset = dest.cpus.length - source.cpus.length) > 0) {
|
||||
dest.cpus.splice(-offset)
|
||||
}
|
||||
} else if (endsWith(key, 'ifs')) {
|
||||
// For each pif or vif
|
||||
for (const ifType in source[key]) {
|
||||
for (const pifIndex in source[key][ifType]) {
|
||||
dest[key][ifType][pifIndex].splice(
|
||||
0,
|
||||
dest[key][ifType][pifIndex].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of pifs has been decreased, remove !
|
||||
let offset
|
||||
|
||||
if (
|
||||
(offset = dest[key][ifType].length - source[key][ifType].length) > 0
|
||||
) {
|
||||
dest[key][ifType].splice(-offset)
|
||||
}
|
||||
}
|
||||
} else if (key === 'xvds') {
|
||||
for (const xvdType in source.xvds) {
|
||||
for (const xvdLetter in source.xvds[xvdType]) {
|
||||
dest.xvds[xvdType][xvdLetter].splice(
|
||||
0,
|
||||
dest.xvds[xvdType][xvdLetter].length - pointsPerStep
|
||||
)
|
||||
}
|
||||
|
||||
// If the number of xvds has been decreased, remove !
|
||||
// FIXME
|
||||
}
|
||||
} else if (key === 'load') {
|
||||
dest.load.splice(0, dest[key].length - pointsPerStep)
|
||||
} else if (key === 'memory') {
|
||||
// Load, memory, memoryFree, memoryUsed
|
||||
const length = dest.memory.length - pointsPerStep
|
||||
dest.memory.splice(0, length)
|
||||
dest.memoryFree.splice(0, length)
|
||||
dest.memoryUsed.splice(0, length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// HOST: Computation and stats update
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Compute one stats row for one host
|
||||
_parseRowHostStats (hostLegends, hostStats, values) {
|
||||
// Cpus
|
||||
hostLegends.cpus.forEach((cpuIndex, index) => {
|
||||
if (hostStats.cpus[index] === undefined) {
|
||||
hostStats.cpus[index] = []
|
||||
}
|
||||
|
||||
hostStats.cpus[index].push(values[cpuIndex] * 100)
|
||||
})
|
||||
|
||||
// Pifs
|
||||
for (const pifType in hostLegends.pifs) {
|
||||
hostLegends.pifs[pifType].forEach((pifIndex, index) => {
|
||||
if (hostStats.pifs[pifType][index] === undefined) {
|
||||
hostStats.pifs[pifType][index] = []
|
||||
}
|
||||
|
||||
hostStats.pifs[pifType][index].push(convertNanToNull(values[pifIndex]))
|
||||
})
|
||||
}
|
||||
|
||||
// Load
|
||||
hostStats.load.push(convertNanToNull(values[hostLegends.load]))
|
||||
|
||||
// Memory.
|
||||
// WARNING! memory/memoryFree are in kB.
|
||||
const memory = values[hostLegends.memory] * 1024
|
||||
const memoryFree = values[hostLegends.memoryFree] * 1024
|
||||
|
||||
hostStats.memory.push(memory)
|
||||
|
||||
if (hostLegends.memoryFree !== undefined) {
|
||||
hostStats.memoryFree.push(memoryFree)
|
||||
hostStats.memoryUsed.push(memory - memoryFree)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute stats for host from RRD update
|
||||
_parseHostStats (json, hostname, hostLegends, step) {
|
||||
const host = this._hosts[hostname][step]
|
||||
|
||||
if (host.stats === undefined) {
|
||||
host.stats = getNewHostStats()
|
||||
}
|
||||
|
||||
for (const row of json.data) {
|
||||
this._parseRowHostStats(hostLegends, host.stats, row.values)
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// VM: Computation and stats update
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Compute stats for vms from RRD update
|
||||
_parseRowVmStats (vmLegends, vmStats, values) {
|
||||
// Cpus
|
||||
vmLegends.cpus.forEach((cpuIndex, index) => {
|
||||
if (vmStats.cpus[index] === undefined) {
|
||||
vmStats.cpus[index] = []
|
||||
}
|
||||
|
||||
vmStats.cpus[index].push(values[cpuIndex] * 100)
|
||||
})
|
||||
|
||||
// Vifs
|
||||
for (const vifType in vmLegends.vifs) {
|
||||
vmLegends.vifs[vifType].forEach((vifIndex, index) => {
|
||||
if (vmStats.vifs[vifType][index] === undefined) {
|
||||
vmStats.vifs[vifType][index] = []
|
||||
}
|
||||
|
||||
vmStats.vifs[vifType][index].push(convertNanToNull(values[vifIndex]))
|
||||
})
|
||||
}
|
||||
|
||||
// Xvds
|
||||
for (const xvdType in vmLegends.xvds) {
|
||||
for (const index in vmLegends.xvds[xvdType]) {
|
||||
if (vmStats.xvds[xvdType][index] === undefined) {
|
||||
vmStats.xvds[xvdType][index] = []
|
||||
}
|
||||
|
||||
vmStats.xvds[xvdType][index].push(
|
||||
convertNanToNull(values[vmLegends.xvds[xvdType][index]])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Memory
|
||||
// WARNING! memoryFree is in Kb not in b, memory is in b
|
||||
const memory = values[vmLegends.memory]
|
||||
const memoryFree = values[vmLegends.memoryFree] * 1024
|
||||
|
||||
vmStats.memory.push(memory)
|
||||
|
||||
if (vmLegends.memoryFree !== undefined) {
|
||||
vmStats.memoryFree.push(memoryFree)
|
||||
vmStats.memoryUsed.push(memory - memoryFree)
|
||||
}
|
||||
}
|
||||
|
||||
// Compute stats for vms
|
||||
_parseVmsStats (json, hostname, vmsLegends, step) {
|
||||
if (this._vms[hostname][step] === undefined) {
|
||||
this._vms[hostname][step] = {}
|
||||
}
|
||||
|
||||
const vms = this._vms[hostname][step]
|
||||
|
||||
for (const uuid in vmsLegends) {
|
||||
if (vms[uuid] === undefined) {
|
||||
vms[uuid] = getNewVmStats()
|
||||
}
|
||||
}
|
||||
|
||||
for (const row of json.data) {
|
||||
for (const uuid in vmsLegends) {
|
||||
this._parseRowVmStats(vmsLegends[uuid], vms[uuid], row.values)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Execute one http request on a XenServer for get stats
|
||||
// Return stats (Json format) or throws got exception
|
||||
@limitConcurrency(3)
|
||||
@@ -411,40 +256,46 @@ export default class XapiStats {
|
||||
.then(response => response.readAll().then(JSON5.parse))
|
||||
}
|
||||
|
||||
async _getLastTimestamp (xapi, host, step) {
|
||||
if (this._hosts[host.address][step] === undefined) {
|
||||
const serverTimeStamp = await getServerTimestamp(xapi, host)
|
||||
return serverTimeStamp - step * RRD_POINTS_PER_STEP[step] + step
|
||||
}
|
||||
async _getNextTimestamp (xapi, host, step) {
|
||||
const currentTimeStamp = await getServerTimestamp(xapi, host.$ref)
|
||||
const maxDuration = step * RRD_POINTS_PER_STEP[step]
|
||||
const lastTimestamp = get(this._statsByObject, [
|
||||
host.uuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
|
||||
return this._hosts[host.address][step].endTimestamp
|
||||
if (
|
||||
lastTimestamp === undefined ||
|
||||
currentTimeStamp - lastTimestamp + step > maxDuration
|
||||
) {
|
||||
return currentTimeStamp - maxDuration + step
|
||||
}
|
||||
return lastTimestamp
|
||||
}
|
||||
|
||||
_getPoints (hostname, step, vmId) {
|
||||
const hostStats = this._hosts[hostname][step]
|
||||
_getStats (hostUuid, step, vmUuid) {
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
|
||||
// Return host points
|
||||
if (vmId === undefined) {
|
||||
// Return host stats
|
||||
if (vmUuid === undefined) {
|
||||
return {
|
||||
interval: step,
|
||||
...hostStats,
|
||||
}
|
||||
}
|
||||
|
||||
const vmsStats = this._vms[hostname][step]
|
||||
|
||||
// Return vm points
|
||||
// Return vm stats
|
||||
return {
|
||||
interval: step,
|
||||
endTimestamp: hostStats.endTimestamp,
|
||||
stats: (vmsStats && vmsStats[vmId]) || getNewVmStats(),
|
||||
...this._statsByObject[vmUuid][step],
|
||||
}
|
||||
}
|
||||
|
||||
async _getAndUpdatePoints (xapi, host, vmId, granularity) {
|
||||
// Get granularity to use
|
||||
async _getAndUpdateStats (xapi, { host, vmUuid, granularity }) {
|
||||
const step =
|
||||
granularity === undefined || granularity === 0
|
||||
granularity === undefined
|
||||
? RRD_STEP_SECONDS
|
||||
: RRD_STEP_FROM_STRING[granularity]
|
||||
|
||||
@@ -455,59 +306,21 @@ export default class XapiStats {
|
||||
}
|
||||
|
||||
// Limit the number of http requests
|
||||
const hostname = host.address
|
||||
|
||||
if (this._hosts[hostname] === undefined) {
|
||||
this._hosts[hostname] = {}
|
||||
this._vms[hostname] = {}
|
||||
}
|
||||
const hostUuid = host.uuid
|
||||
|
||||
if (
|
||||
this._hosts[hostname][step] !== undefined &&
|
||||
this._hosts[hostname][step].localTimestamp + step > getCurrentTimestamp()
|
||||
get(this._statsByObject, [hostUuid, step, 'localTimestamp']) + step >
|
||||
getCurrentTimestamp()
|
||||
) {
|
||||
return this._getPoints(hostname, step, vmId)
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
}
|
||||
|
||||
// Check if we are in the good interval, use this._hosts[hostname][step].localTimestamp
|
||||
// for avoid bad requests
|
||||
// TODO
|
||||
|
||||
// Get json
|
||||
const timestamp = await this._getLastTimestamp(xapi, host, step)
|
||||
let json = await this._getJson(xapi, host, timestamp)
|
||||
|
||||
// Check if the granularity is linked to 'step'
|
||||
// If it's not the case, we retry other url with the json timestamp
|
||||
const timestamp = await this._getNextTimestamp(xapi, host, step)
|
||||
const json = await this._getJson(xapi, host, timestamp)
|
||||
if (json.meta.step !== step) {
|
||||
console.log(
|
||||
`RRD call: Expected step: ${step}, received step: ${
|
||||
json.meta.step
|
||||
}. Retry with other timestamp`
|
||||
throw new FaultyGranularity(
|
||||
`Unable to get the true granularity: ${json.meta.step}`
|
||||
)
|
||||
const serverTimestamp = await getServerTimestamp(xapi, host)
|
||||
|
||||
// Approximately: half points are asked
|
||||
// FIXME: Not the best solution
|
||||
json = await this._getJson(
|
||||
xapi,
|
||||
host,
|
||||
serverTimestamp - step * (RRD_POINTS_PER_STEP[step] / 2) + step
|
||||
)
|
||||
|
||||
if (json.meta.step !== step) {
|
||||
throw new FaultyGranularity(
|
||||
`Unable to get the true granularity: ${json.meta.step}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Make new backup slot if necessary
|
||||
if (this._hosts[hostname][step] === undefined) {
|
||||
this._hosts[hostname][step] = {
|
||||
endTimestamp: 0,
|
||||
localTimestamp: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// It exists data
|
||||
@@ -516,70 +329,133 @@ export default class XapiStats {
|
||||
// timestamp of the oldest data value
|
||||
// So, we use the timestamp of the oldest data value !
|
||||
const startTimestamp = json.data[json.meta.rows - 1].t
|
||||
const endTimestamp = get(this._statsByObject, [
|
||||
hostUuid,
|
||||
step,
|
||||
'endTimestamp',
|
||||
])
|
||||
|
||||
// Remove useless data and reorder
|
||||
// Note: Older values are at end of json.data.row
|
||||
const parseOffset =
|
||||
(this._hosts[hostname][step].endTimestamp - startTimestamp + step) /
|
||||
step
|
||||
|
||||
json.data.splice(json.data.length - parseOffset)
|
||||
json.data.reverse()
|
||||
const statsOffset = endTimestamp - startTimestamp + step
|
||||
if (endTimestamp !== undefined && statsOffset > 0) {
|
||||
const parseOffset = statsOffset / step
|
||||
// Remove useless data
|
||||
// Note: Older values are at end of json.data.row
|
||||
json.data.splice(json.data.length - parseOffset)
|
||||
}
|
||||
|
||||
// It exists useful data
|
||||
if (json.data.length > 0) {
|
||||
const [hostLegends, vmsLegends] = parseLegends(json)
|
||||
|
||||
// Compute and update host/vms stats
|
||||
this._parseVmsStats(json, hostname, vmsLegends, step)
|
||||
this._parseHostStats(json, hostname, hostLegends, step)
|
||||
|
||||
// Remove older stats
|
||||
this._removeOlderStats(
|
||||
hostLegends,
|
||||
this._hosts[hostname][step].stats,
|
||||
RRD_POINTS_PER_STEP[step]
|
||||
)
|
||||
|
||||
for (const uuid in vmsLegends) {
|
||||
this._removeOlderStats(
|
||||
vmsLegends[uuid],
|
||||
this._vms[hostname][step][uuid],
|
||||
RRD_POINTS_PER_STEP[step]
|
||||
// reorder data
|
||||
json.data.reverse()
|
||||
forEach(json.meta.legend, (legend, index) => {
|
||||
const [, type, uuid, metricType] = /^AVERAGE:([^:]+):(.+):(.+)$/.exec(
|
||||
legend
|
||||
)
|
||||
}
|
||||
|
||||
const metrics = STATS[type]
|
||||
if (metrics === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const { metric, testResult } = findMetric(metrics, metricType)
|
||||
|
||||
if (metric === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const path =
|
||||
metric.getPath !== undefined
|
||||
? metric.getPath(testResult)
|
||||
: [findKey(metrics, metric)]
|
||||
|
||||
const metricValues = getValuesFromDepth(this._statsByObject, [
|
||||
uuid,
|
||||
step,
|
||||
'stats',
|
||||
...path,
|
||||
])
|
||||
|
||||
metricValues.push(
|
||||
...computeValues(json.data, index, metric.transformValue)
|
||||
)
|
||||
|
||||
// remove older Values
|
||||
metricValues.splice(
|
||||
0,
|
||||
metricValues.length - RRD_POINTS_PER_STEP[step]
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
this._hosts[hostname][step].endTimestamp = json.meta.end
|
||||
this._hosts[hostname][step].localTimestamp = getCurrentTimestamp()
|
||||
|
||||
return this._getPoints(hostname, step, vmId)
|
||||
const hostStats = this._statsByObject[hostUuid][step]
|
||||
hostStats.endTimestamp = json.meta.end
|
||||
hostStats.localTimestamp = getCurrentTimestamp()
|
||||
return this._getStats(hostUuid, step, vmUuid)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// Warning: This functions returns one reference on internal data
|
||||
// So, data can be changed by a parallel call on this functions
|
||||
// It is forbidden to modify the returned data
|
||||
|
||||
// Return host stats
|
||||
async getHostPoints (xapi, hostId, granularity) {
|
||||
const host = xapi.getObject(hostId)
|
||||
return this._getAndUpdatePoints(xapi, host, undefined, granularity)
|
||||
getHostStats (xapi, hostId, granularity) {
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host: xapi.getObject(hostId),
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
|
||||
// Return vms stats
|
||||
async getVmPoints (xapi, vmId, granularity) {
|
||||
getVmStats (xapi, vmId, granularity) {
|
||||
const vm = xapi.getObject(vmId)
|
||||
const host = vm.$resident_on
|
||||
|
||||
if (!host) {
|
||||
throw new Error(`VM ${vmId} is halted or host could not be found.`)
|
||||
}
|
||||
|
||||
return this._getAndUpdatePoints(xapi, host, vm.uuid, granularity)
|
||||
return this._getAndUpdateStats(xapi, {
|
||||
host,
|
||||
vmUuid: vm.uuid,
|
||||
granularity,
|
||||
})
|
||||
}
|
||||
|
||||
async getSrStats (xapi, srId, granularity) {
|
||||
const sr = xapi.getObject(srId)
|
||||
|
||||
const hostsStats = {}
|
||||
await Promise.all(
|
||||
map(uniq(map(sr.$PBDs, 'host')), hostId =>
|
||||
this.getHostStats(xapi, hostId, granularity).then(stats => {
|
||||
hostsStats[xapi.getObject(hostId).name_label] = stats
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
const srShortUUID = sr.uuid.slice(0, 8)
|
||||
return {
|
||||
interval: hostsStats[Object.keys(hostsStats)[0]].interval,
|
||||
endTimestamp: Math.max(...map(hostsStats, 'endTimestamp')),
|
||||
localTimestamp: Math.min(...map(hostsStats, 'localTimestamp')),
|
||||
stats: {
|
||||
iops: {
|
||||
r: combineStats(hostsStats, `stats.iops.r[${srShortUUID}]`, sum),
|
||||
w: combineStats(hostsStats, `stats.iops.w[${srShortUUID}]`, sum),
|
||||
},
|
||||
ioThroughput: {
|
||||
r: combineStats(
|
||||
hostsStats,
|
||||
`stats.ioThroughput.r[${srShortUUID}]`,
|
||||
sum
|
||||
),
|
||||
w: combineStats(
|
||||
hostsStats,
|
||||
`stats.ioThroughput.w[${srShortUUID}]`,
|
||||
sum
|
||||
),
|
||||
},
|
||||
latency: {
|
||||
r: combineStats(hostsStats, `stats.latency.r[${srShortUUID}]`, mean),
|
||||
w: combineStats(hostsStats, `stats.latency.w[${srShortUUID}]`, mean),
|
||||
},
|
||||
iowait: mapValues(hostsStats, `stats.iowait[${srShortUUID}]`),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,9 +6,10 @@ import synchronized from 'decorator-synchronized'
|
||||
import tarStream from 'tar-stream'
|
||||
import vmdkToVhd from 'xo-vmdk-to-vhd'
|
||||
import {
|
||||
cancellable,
|
||||
cancelable,
|
||||
catchPlus as pCatch,
|
||||
defer,
|
||||
fromEvent,
|
||||
ignoreErrors,
|
||||
} from 'promise-toolbox'
|
||||
import { PassThrough } from 'stream'
|
||||
@@ -34,7 +35,6 @@ import { mixin } from '../decorators'
|
||||
import {
|
||||
asyncMap,
|
||||
camelToSnakeCase,
|
||||
createRawObject,
|
||||
ensureArray,
|
||||
forEach,
|
||||
isFunction,
|
||||
@@ -50,6 +50,7 @@ import {
|
||||
|
||||
import mixins from './mixins'
|
||||
import OTHER_CONFIG_TEMPLATE from './other-config-template'
|
||||
import { type DeltaVmExport } from './'
|
||||
import {
|
||||
asBoolean,
|
||||
asInteger,
|
||||
@@ -84,9 +85,6 @@ export const VDI_FORMAT_RAW = 'raw'
|
||||
export const IPV4_CONFIG_MODES = ['None', 'DHCP', 'Static']
|
||||
export const IPV6_CONFIG_MODES = ['None', 'DHCP', 'Static', 'Autoconf']
|
||||
|
||||
// do not share the same limit for export and import, it could lead to deadlocks
|
||||
const importLimit = concurrency(2)
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@mixin(mapToArray(mixins))
|
||||
@@ -103,8 +101,8 @@ export default class Xapi extends XapiBase {
|
||||
return getObject.apply(this, args)
|
||||
})(this.getObject)
|
||||
|
||||
const genericWatchers = (this._genericWatchers = createRawObject())
|
||||
const objectsWatchers = (this._objectWatchers = createRawObject())
|
||||
const genericWatchers = (this._genericWatchers = { __proto__: null })
|
||||
const objectsWatchers = (this._objectWatchers = { __proto__: null })
|
||||
|
||||
const onAddOrUpdate = objects => {
|
||||
forEach(objects, object => {
|
||||
@@ -268,8 +266,8 @@ export default class Xapi extends XapiBase {
|
||||
return value === null
|
||||
? removal
|
||||
: removal
|
||||
::ignoreErrors()
|
||||
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
|
||||
::ignoreErrors()
|
||||
.then(() => this.call(add, ref, name, prepareXapiParam(value)))
|
||||
}
|
||||
})
|
||||
)
|
||||
@@ -519,9 +517,9 @@ export default class Xapi extends XapiBase {
|
||||
const onVmCreation =
|
||||
nameLabel !== undefined
|
||||
? vm =>
|
||||
targetXapi._setObjectProperties(vm, {
|
||||
nameLabel,
|
||||
})
|
||||
targetXapi._setObjectProperties(vm, {
|
||||
nameLabel,
|
||||
})
|
||||
: null
|
||||
|
||||
const vm = await targetXapi._getOrWaitObject(
|
||||
@@ -635,7 +633,12 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
async _deleteVm (vm, deleteDisks = true, force = false) {
|
||||
async _deleteVm (
|
||||
vm,
|
||||
deleteDisks = true,
|
||||
force = false,
|
||||
forceDeleteDefaultTemplate = false
|
||||
) {
|
||||
debug(`Deleting VM ${vm.name_label}`)
|
||||
|
||||
const { $ref } = vm
|
||||
@@ -656,6 +659,10 @@ export default class Xapi extends XapiBase {
|
||||
vm = await this.barrier('VM', $ref)
|
||||
|
||||
return Promise.all([
|
||||
forceDeleteDefaultTemplate &&
|
||||
this._updateObjectMapProperty(vm, 'other_config', {
|
||||
default_template: null,
|
||||
}),
|
||||
this.call('VM.destroy', $ref),
|
||||
|
||||
asyncMap(vm.$snapshots, snapshot =>
|
||||
@@ -695,8 +702,13 @@ export default class Xapi extends XapiBase {
|
||||
])
|
||||
}
|
||||
|
||||
async deleteVm (vmId, deleteDisks, force) {
|
||||
return /* await */ this._deleteVm(this.getObject(vmId), deleteDisks, force)
|
||||
async deleteVm (vmId, deleteDisks, force, forceDeleteDefaultTemplate) {
|
||||
return /* await */ this._deleteVm(
|
||||
this.getObject(vmId),
|
||||
deleteDisks,
|
||||
force,
|
||||
forceDeleteDefaultTemplate
|
||||
)
|
||||
}
|
||||
|
||||
getVmConsole (vmId) {
|
||||
@@ -711,17 +723,23 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
// Returns a stream to the exported VM.
|
||||
async exportVm (vmId, { compress = true } = {}) {
|
||||
@concurrency(2, stream => stream.then(stream => fromEvent(stream, 'end')))
|
||||
@cancelable
|
||||
async exportVm ($cancelToken, vmId, { compress = true } = {}) {
|
||||
const vm = this.getObject(vmId)
|
||||
|
||||
let host
|
||||
let snapshotRef
|
||||
if (isVmRunning(vm)) {
|
||||
host = vm.$resident_on
|
||||
snapshotRef = (await this._snapshotVm(vm)).$ref
|
||||
snapshotRef = (await this._snapshotVm(
|
||||
$cancelToken,
|
||||
vm,
|
||||
`[XO Export] ${vm.name_label}`
|
||||
)).$ref
|
||||
}
|
||||
|
||||
const promise = this.getResource('/export/', {
|
||||
const promise = this.getResource($cancelToken, '/export/', {
|
||||
host,
|
||||
query: {
|
||||
ref: snapshotRef || vm.$ref,
|
||||
@@ -772,20 +790,21 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
_assertHealthyVdiChains (vm) {
|
||||
const cache = createRawObject()
|
||||
const cache = { __proto__: null }
|
||||
forEach(vm.$VBDs, ({ $VDI }) => {
|
||||
this._assertHealthyVdiChain($VDI, cache)
|
||||
})
|
||||
}
|
||||
|
||||
// Create a snapshot of the VM and returns a delta export object.
|
||||
@cancellable
|
||||
// Create a snapshot (if necessary) of the VM and returns a delta export
|
||||
// object.
|
||||
@cancelable
|
||||
@deferrable
|
||||
async exportDeltaVm (
|
||||
$defer,
|
||||
$cancelToken,
|
||||
vmId,
|
||||
baseVmId = undefined,
|
||||
vmId: string,
|
||||
baseVmId?: string,
|
||||
{
|
||||
bypassVdiChainsCheck = false,
|
||||
|
||||
@@ -795,17 +814,16 @@ export default class Xapi extends XapiBase {
|
||||
disableBaseTags = false,
|
||||
snapshotNameLabel = undefined,
|
||||
} = {}
|
||||
) {
|
||||
): Promise<DeltaVmExport> {
|
||||
let vm = this.getObject(vmId)
|
||||
if (!bypassVdiChainsCheck) {
|
||||
this._assertHealthyVdiChains(this.getObject(vmId))
|
||||
this._assertHealthyVdiChains(vm)
|
||||
}
|
||||
|
||||
const vm = await this.snapshotVm(vmId)
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
if (snapshotNameLabel) {
|
||||
;this._setObjectProperties(vm, {
|
||||
nameLabel: snapshotNameLabel,
|
||||
})::ignoreErrors()
|
||||
// do not use the snapshot name in the delta export
|
||||
const exportedNameLabel = vm.name_label
|
||||
if (!vm.is_a_snapshot) {
|
||||
vm = await this._snapshotVm($cancelToken, vm, snapshotNameLabel)
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
}
|
||||
|
||||
const baseVm = baseVmId && this.getObject(baseVmId)
|
||||
@@ -856,34 +874,30 @@ export default class Xapi extends XapiBase {
|
||||
// Look for a snapshot of this vdi in the base VM.
|
||||
const baseVdi = baseVdis[vdi.snapshot_of]
|
||||
|
||||
vdis[vdiRef] =
|
||||
baseVdi && !disableBaseTags
|
||||
? {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: baseVdi.uuid,
|
||||
},
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
: {
|
||||
...vdi,
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
const stream = (streams[`${vdiRef}.vhd`] = this._exportVdi(
|
||||
$cancelToken,
|
||||
vdi,
|
||||
baseVdi,
|
||||
VDI_FORMAT_VHD
|
||||
))
|
||||
$defer.onFailure(stream.cancel)
|
||||
vdis[vdiRef] = {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]:
|
||||
baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
|
||||
},
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
|
||||
streams[`${vdiRef}.vhd`] = () =>
|
||||
this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
|
||||
})
|
||||
|
||||
const vifs = {}
|
||||
forEach(vm.$VIFs, vif => {
|
||||
const network = vif.$network
|
||||
vifs[vif.$ref] = {
|
||||
...vif,
|
||||
$network$uuid: vif.$network.uuid,
|
||||
$network$uuid: network.uuid,
|
||||
$network$name_label: network.name_label,
|
||||
// https://github.com/babel/babel-eslint/issues/595
|
||||
// eslint-disable-next-line no-undef
|
||||
$network$VLAN: network.$PIFs[0]?.VLAN,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -895,18 +909,21 @@ export default class Xapi extends XapiBase {
|
||||
vifs,
|
||||
vm: {
|
||||
...vm,
|
||||
name_label: exportedNameLabel,
|
||||
other_config:
|
||||
baseVm && !disableBaseTags
|
||||
? {
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
: omit(vm.other_config, TAG_BASE_DELTA),
|
||||
},
|
||||
},
|
||||
'streams',
|
||||
{
|
||||
value: await streams::pAll(),
|
||||
configurable: true,
|
||||
value: streams,
|
||||
writable: true,
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -914,9 +931,10 @@ export default class Xapi extends XapiBase {
|
||||
@deferrable
|
||||
async importDeltaVm (
|
||||
$defer,
|
||||
delta,
|
||||
delta: DeltaVmExport,
|
||||
{
|
||||
deleteBase = false,
|
||||
detectBase = true,
|
||||
disableStartAfterImport = true,
|
||||
mapVdisSrs = {},
|
||||
name_label = delta.vm.name_label,
|
||||
@@ -929,17 +947,19 @@ export default class Xapi extends XapiBase {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const remoteBaseVmUuid = delta.vm.other_config[TAG_BASE_DELTA]
|
||||
let baseVm
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(
|
||||
this.objects.all,
|
||||
obj =>
|
||||
(obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid
|
||||
)
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = delta.vm.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(
|
||||
this.objects.all,
|
||||
obj =>
|
||||
(obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid
|
||||
)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error('could not find the base VM')
|
||||
if (!baseVm) {
|
||||
throw new Error('could not find the base VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -949,8 +969,6 @@ export default class Xapi extends XapiBase {
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
})
|
||||
|
||||
const { streams } = delta
|
||||
|
||||
// 1. Create the VMs.
|
||||
const vm = await this._getOrWaitObject(
|
||||
await this._createVmRecord({
|
||||
@@ -978,7 +996,7 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// 3. Create VDIs.
|
||||
const newVdis = await map(delta.vdis, async vdi => {
|
||||
const remoteBaseVdiUuid = vdi.other_config[TAG_BASE_DELTA]
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (!remoteBaseVdiUuid) {
|
||||
const newVdi = await this.createVdi({
|
||||
...vdi,
|
||||
@@ -1012,12 +1030,26 @@ export default class Xapi extends XapiBase {
|
||||
return newVdi
|
||||
})::pAll()
|
||||
|
||||
const networksOnPoolMasterByDevice = {}
|
||||
const networksByNameLabelByVlan = {}
|
||||
let defaultNetwork
|
||||
forEach(this.pool.$master.$PIFs, pif => {
|
||||
defaultNetwork = networksOnPoolMasterByDevice[pif.device] = pif.$network
|
||||
forEach(this.objects.all, object => {
|
||||
if (object.$type === 'network') {
|
||||
const pif = object.$PIFs[0]
|
||||
if (pif === undefined) {
|
||||
// ignore network
|
||||
return
|
||||
}
|
||||
const vlan = pif.VLAN
|
||||
const networksByNameLabel =
|
||||
networksByNameLabelByVlan[vlan] ||
|
||||
(networksByNameLabelByVlan[vlan] = {})
|
||||
defaultNetwork = networksByNameLabel[object.name_label] = object
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = delta
|
||||
let transferSize = 0
|
||||
|
||||
await Promise.all([
|
||||
// Create VBDs.
|
||||
asyncMap(delta.vbds, vbd =>
|
||||
@@ -1030,8 +1062,17 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Import VDI contents.
|
||||
asyncMap(newVdis, async (vdi, id) => {
|
||||
for (const stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
await this._importVdiContent(vdi, stream, VDI_FORMAT_VHD)
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
const sizeStream = stream
|
||||
.pipe(createSizeStream())
|
||||
.once('finish', () => {
|
||||
transferSize += sizeStream.size
|
||||
})
|
||||
stream.task = sizeStream.task
|
||||
await this._importVdiContent(vdi, sizeStream, VDI_FORMAT_VHD)
|
||||
}
|
||||
}),
|
||||
|
||||
@@ -1040,10 +1081,21 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(delta.vifs, vif => {
|
||||
const network =
|
||||
(vif.$network$uuid && this.getObject(vif.$network$uuid, null)) ||
|
||||
networksOnPoolMasterByDevice[vif.device] ||
|
||||
defaultNetwork
|
||||
let network =
|
||||
vif.$network$uuid && this.getObject(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
const { $network$VLAN: vlan = -1 } = vif
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan]
|
||||
if (networksByNameLabel !== undefined) {
|
||||
network = networksByNameLabel[vif.$network$name_label]
|
||||
if (network === undefined) {
|
||||
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
|
||||
}
|
||||
} else {
|
||||
network = defaultNetwork
|
||||
}
|
||||
}
|
||||
|
||||
if (network) {
|
||||
return this._createVif(vm, network, vif)
|
||||
@@ -1067,7 +1119,7 @@ export default class Xapi extends XapiBase {
|
||||
}),
|
||||
])
|
||||
|
||||
return vm
|
||||
return { transferSize, vm }
|
||||
}
|
||||
|
||||
async _migrateVmWithStorageMotion (
|
||||
@@ -1227,8 +1279,8 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
@importLimit
|
||||
async _importVm (stream, sr, onVmCreation = undefined) {
|
||||
@cancelable
|
||||
async _importVm ($cancelToken, stream, sr, onVmCreation = undefined) {
|
||||
const taskRef = await this.createTask('VM import')
|
||||
const query = {}
|
||||
|
||||
@@ -1238,16 +1290,18 @@ export default class Xapi extends XapiBase {
|
||||
query.sr_id = sr.$ref
|
||||
}
|
||||
|
||||
if (onVmCreation) {
|
||||
if (onVmCreation != null) {
|
||||
;this._waitObject(
|
||||
obj =>
|
||||
obj && obj.current_operations && taskRef in obj.current_operations
|
||||
obj != null &&
|
||||
obj.current_operations != null &&
|
||||
taskRef in obj.current_operations
|
||||
)
|
||||
.then(onVmCreation)
|
||||
::ignoreErrors()
|
||||
}
|
||||
|
||||
const vmRef = await this.putResource(stream, '/import/', {
|
||||
const vmRef = await this.putResource($cancelToken, stream, '/import/', {
|
||||
host,
|
||||
query,
|
||||
task: taskRef,
|
||||
@@ -1256,7 +1310,6 @@ export default class Xapi extends XapiBase {
|
||||
return vmRef
|
||||
}
|
||||
|
||||
@importLimit
|
||||
@deferrable
|
||||
async _importOvaVm (
|
||||
$defer,
|
||||
@@ -1408,8 +1461,9 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
}
|
||||
|
||||
@synchronized() // like @concurrency(1) but more efficient
|
||||
async _snapshotVm (vm, nameLabel = vm.name_label) {
|
||||
@concurrency(2)
|
||||
@cancelable
|
||||
async _snapshotVm ($cancelToken, vm, nameLabel = vm.name_label) {
|
||||
debug(
|
||||
`Snapshotting VM ${vm.name_label}${
|
||||
nameLabel !== vm.name_label ? ` as ${nameLabel}` : ''
|
||||
@@ -1418,10 +1472,13 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
let ref
|
||||
try {
|
||||
ref = await this.call('VM.snapshot_with_quiesce', vm.$ref, nameLabel)
|
||||
ref = await this.callAsync(
|
||||
$cancelToken,
|
||||
'VM.snapshot_with_quiesce',
|
||||
vm.$ref,
|
||||
nameLabel
|
||||
).then(extractOpaqueRef)
|
||||
this.addTag(ref, 'quiesce')::ignoreErrors()
|
||||
|
||||
await this._waitObjectState(ref, vm => includes(vm.tags, 'quiesce'))
|
||||
} catch (error) {
|
||||
const { code } = error
|
||||
if (
|
||||
@@ -1434,13 +1491,18 @@ export default class Xapi extends XapiBase {
|
||||
) {
|
||||
throw error
|
||||
}
|
||||
ref = await this.call('VM.snapshot', vm.$ref, nameLabel)
|
||||
ref = await this.callAsync(
|
||||
$cancelToken,
|
||||
'VM.snapshot',
|
||||
vm.$ref,
|
||||
nameLabel
|
||||
).then(extractOpaqueRef)
|
||||
}
|
||||
// Convert the template to a VM and wait to have receive the up-
|
||||
// to-date object.
|
||||
const [, snapshot] = await Promise.all([
|
||||
this.call('VM.set_is_a_template', ref, false),
|
||||
this._waitObjectState(ref, snapshot => !snapshot.is_a_template),
|
||||
this.barrier(ref),
|
||||
])
|
||||
|
||||
return snapshot
|
||||
@@ -1850,7 +1912,8 @@ export default class Xapi extends XapiBase {
|
||||
return snap
|
||||
}
|
||||
|
||||
@cancellable
|
||||
@concurrency(12, stream => stream.then(stream => fromEvent(stream, 'end')))
|
||||
@cancelable
|
||||
_exportVdi ($cancelToken, vdi, base, format = VDI_FORMAT_VHD) {
|
||||
const host = vdi.$SR.$PBDs[0].$host
|
||||
|
||||
@@ -1875,15 +1938,6 @@ export default class Xapi extends XapiBase {
|
||||
})
|
||||
}
|
||||
|
||||
// Returns a stream to the exported VDI.
|
||||
exportVdi (vdiId, { baseId, format } = {}) {
|
||||
return this._exportVdi(
|
||||
this.getObject(vdiId),
|
||||
baseId && this.getObject(baseId),
|
||||
format
|
||||
)
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
async _importVdiContent (vdi, body, format = VDI_FORMAT_VHD) {
|
||||
@@ -1980,7 +2034,9 @@ export default class Xapi extends XapiBase {
|
||||
name_label: name,
|
||||
name_description: description,
|
||||
MTU: asInteger(mtu),
|
||||
other_config: {},
|
||||
// Set automatic to false so XenCenter does not get confused
|
||||
// https://citrix.github.io/xenserver-sdk/#network
|
||||
other_config: { automatic: 'false' },
|
||||
})
|
||||
$defer.onFailure(() => this.call('network.destroy', networkRef))
|
||||
if (pifId) {
|
||||
|
||||
85
packages/xo-server/src/xapi/index.js.flow
Normal file
85
packages/xo-server/src/xapi/index.js.flow
Normal file
@@ -0,0 +1,85 @@
|
||||
// @flow
|
||||
|
||||
import { type Readable } from 'stream'
|
||||
|
||||
type AugmentedReadable = Readable & {
|
||||
size?: number,
|
||||
task?: Promise<mixed>
|
||||
}
|
||||
|
||||
type MaybeArray<T> = Array<T> | T
|
||||
|
||||
export type DeltaVmExport = {|
|
||||
streams: $Dict < () => Promise < AugmentedReadable >>,
|
||||
vbds: { [ref: string]: Object },
|
||||
vdis: {
|
||||
[ref: string]: {
|
||||
$SR$uuid: string,
|
||||
snapshot_of: string,
|
||||
}
|
||||
},
|
||||
version: '1.0.0',
|
||||
vifs: { [ref: string]: Object },
|
||||
vm: Vm,
|
||||
|}
|
||||
|
||||
export type DeltaVmImport = {|
|
||||
...DeltaVmExport,
|
||||
streams: $Dict < MaybeArray < AugmentedReadable | () => Promise < AugmentedReadable >>>,
|
||||
|}
|
||||
|
||||
declare class XapiObject {
|
||||
$id: string;
|
||||
$ref: string;
|
||||
$type: string;
|
||||
}
|
||||
|
||||
type Id = string | XapiObject
|
||||
declare export class Vm extends XapiObject {
|
||||
$snapshots: Vm[];
|
||||
name_label: string;
|
||||
other_config: $Dict<string>;
|
||||
snapshot_time: number;
|
||||
uuid: string;
|
||||
}
|
||||
|
||||
declare export class Xapi {
|
||||
objects: { all: $Dict<Object> };
|
||||
|
||||
_assertHealthyVdiChains(vm: Vm): void;
|
||||
_importVm(
|
||||
cancelToken: mixed,
|
||||
stream: AugmentedReadable,
|
||||
sr?: XapiObject,
|
||||
onVmCreation?: (XapiObject) => any
|
||||
): Promise<string>;
|
||||
_updateObjectMapProperty(
|
||||
object: XapiObject,
|
||||
property: string,
|
||||
entries: $Dict<string>
|
||||
): Promise<void>;
|
||||
_setObjectProperties(
|
||||
object: XapiObject,
|
||||
properties: $Dict<mixed>
|
||||
): Promise<void>;
|
||||
_snapshotVm(cancelToken: mixed, vm: Vm, nameLabel?: string): Promise<Vm>;
|
||||
|
||||
addTag(object: Id, tag: string): Promise<void>;
|
||||
barrier(): void;
|
||||
barrier(ref: string): XapiObject;
|
||||
deleteVm(vm: Id): Promise<void>;
|
||||
editVm(vm: Id, $Dict<mixed>): Promise<void>;
|
||||
exportDeltaVm(
|
||||
cancelToken: mixed,
|
||||
snapshot: Id,
|
||||
baseSnapshot ?: Id
|
||||
): Promise<DeltaVmExport>;
|
||||
exportVm(
|
||||
cancelToken: mixed,
|
||||
vm: Vm,
|
||||
options ?: Object
|
||||
): Promise<AugmentedReadable>;
|
||||
getObject(object: Id): XapiObject;
|
||||
importDeltaVm(data: DeltaVmImport, options: Object): Promise<{ vm: Vm }>;
|
||||
importVm(stream: AugmentedReadable, options: Object): Promise<Vm>;
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import unzip from 'julien-f-unzip'
|
||||
|
||||
import { debounce } from '../../decorators'
|
||||
import {
|
||||
createRawObject,
|
||||
ensureArray,
|
||||
forEach,
|
||||
mapFilter,
|
||||
@@ -35,7 +34,7 @@ export default {
|
||||
|
||||
const data = parseXml(await readAll()).patchdata
|
||||
|
||||
const patches = createRawObject()
|
||||
const patches = { __proto__: null }
|
||||
forEach(data.patches.patch, patch => {
|
||||
patches[patch.uuid] = {
|
||||
date: patch.timestamp,
|
||||
@@ -65,7 +64,7 @@ export default {
|
||||
})
|
||||
|
||||
const resolveVersionPatches = function (uuids) {
|
||||
const versionPatches = createRawObject()
|
||||
const versionPatches = { __proto__: null }
|
||||
|
||||
forEach(ensureArray(uuids), ({ uuid }) => {
|
||||
versionPatches[uuid] = patches[uuid]
|
||||
@@ -74,7 +73,7 @@ export default {
|
||||
return versionPatches
|
||||
}
|
||||
|
||||
const versions = createRawObject()
|
||||
const versions = { __proto__: null }
|
||||
let latestVersion
|
||||
forEach(data.serverversions.version, version => {
|
||||
versions[version.value] = {
|
||||
@@ -112,7 +111,7 @@ export default {
|
||||
},
|
||||
|
||||
_getInstalledPoolPatchesOnHost (host) {
|
||||
const installed = createRawObject()
|
||||
const installed = { __proto__: null }
|
||||
|
||||
// platform_version < 2.1.1
|
||||
forEach(host.$patches, hostPatch => {
|
||||
@@ -131,7 +130,7 @@ export default {
|
||||
const all = await this._getPoolPatchesForHost(host)
|
||||
const installed = this._getInstalledPoolPatchesOnHost(host)
|
||||
|
||||
const installable = createRawObject()
|
||||
const installable = { __proto__: null }
|
||||
forEach(all, (patch, uuid) => {
|
||||
if (installed[uuid]) {
|
||||
return
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { forEach, groupBy } from 'lodash'
|
||||
|
||||
import { createRawObject, mapToArray } from '../../utils'
|
||||
import { mapToArray } from '../../utils'
|
||||
|
||||
export default {
|
||||
_connectAllSrPbds (sr) {
|
||||
@@ -67,9 +67,9 @@ export default {
|
||||
|
||||
getUnhealthyVdiChainsLength (sr) {
|
||||
const vdis = this.getObject(sr).$VDIs
|
||||
const unhealthyVdis = createRawObject()
|
||||
const unhealthyVdis = { __proto__: null }
|
||||
const children = groupBy(vdis, 'sm_config.vhd-parent')
|
||||
const cache = createRawObject()
|
||||
const cache = { __proto__: null }
|
||||
forEach(vdis, vdi => {
|
||||
if (vdi.managed && !vdi.is_a_snapshot) {
|
||||
const { uuid } = vdi
|
||||
|
||||
@@ -9,7 +9,6 @@ import { satisfies as versionSatisfies } from 'semver'
|
||||
|
||||
import {
|
||||
camelToSnakeCase,
|
||||
createRawObject,
|
||||
forEach,
|
||||
isArray,
|
||||
isBoolean,
|
||||
@@ -77,7 +76,7 @@ export const extractOpaqueRef = str => {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const TYPE_TO_NAMESPACE = createRawObject()
|
||||
const TYPE_TO_NAMESPACE = { __proto__: null }
|
||||
forEach(
|
||||
[
|
||||
'Bond',
|
||||
@@ -116,7 +115,7 @@ export const getNamespaceForType = type => TYPE_TO_NAMESPACE[type] || type
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
export const getVmDisks = vm => {
|
||||
const disks = createRawObject(null)
|
||||
const disks = { __proto__: null }
|
||||
forEach(vm.$VBDs, vbd => {
|
||||
let vdi
|
||||
if (
|
||||
|
||||
@@ -3,7 +3,6 @@ import { forEach, includes, map } from 'lodash'
|
||||
|
||||
import { ModelAlreadyExists } from '../collection'
|
||||
import { Acls } from '../models/acl'
|
||||
import { createRawObject } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -86,10 +85,10 @@ export default class {
|
||||
this._getPermissionsByRole(),
|
||||
])
|
||||
|
||||
const permissions = createRawObject()
|
||||
const permissions = { __proto__: null }
|
||||
for (const { action, object: objectId } of acls) {
|
||||
const current =
|
||||
permissions[objectId] || (permissions[objectId] = createRawObject())
|
||||
permissions[objectId] || (permissions[objectId] = { __proto__: null })
|
||||
|
||||
const permissionsForRole = permissionsByRole[action]
|
||||
if (permissionsForRole) {
|
||||
@@ -128,7 +127,7 @@ export default class {
|
||||
async _getPermissionsByRole () {
|
||||
const roles = await this.getRoles()
|
||||
|
||||
const permissions = createRawObject()
|
||||
const permissions = { __proto__: null }
|
||||
for (const role of roles) {
|
||||
permissions[role.id] = role.permissions
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import { forEach, isArray, isFunction, map, mapValues } from 'lodash'
|
||||
|
||||
import * as methods from '../api'
|
||||
import { MethodNotFound } from 'json-rpc-peer'
|
||||
import { createRawObject, noop, serializeError } from '../utils'
|
||||
import { noop, serializeError } from '../utils'
|
||||
|
||||
import * as errors from 'xo-common/api-errors'
|
||||
|
||||
@@ -164,7 +164,7 @@ const removeSensitiveParams = (value, name) => {
|
||||
export default class Api {
|
||||
constructor (xo) {
|
||||
this._logger = null
|
||||
this._methods = createRawObject()
|
||||
this._methods = { __proto__: null }
|
||||
this._xo = xo
|
||||
|
||||
this.addApiMethods(methods)
|
||||
|
||||
@@ -3,7 +3,7 @@ import { noSuchObject } from 'xo-common/api-errors'
|
||||
import { ignoreErrors } from 'promise-toolbox'
|
||||
|
||||
import Token, { Tokens } from '../models/token'
|
||||
import { createRawObject, forEach, generateToken } from '../utils'
|
||||
import { forEach, generateToken } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -17,7 +17,7 @@ export default class {
|
||||
|
||||
// Store last failures by user to throttle tries (slow bruteforce
|
||||
// attacks).
|
||||
this._failures = createRawObject()
|
||||
this._failures = { __proto__: null }
|
||||
|
||||
this._providers = new Set()
|
||||
|
||||
@@ -154,7 +154,8 @@ export default class {
|
||||
id: await generateToken(),
|
||||
user_id: userId,
|
||||
expiration:
|
||||
Date.now() + (typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
|
||||
Date.now() +
|
||||
(typeof expiresIn === 'string' ? ms(expiresIn) : expiresIn),
|
||||
})
|
||||
|
||||
await this._tokens.add(token)
|
||||
|
||||
1106
packages/xo-server/src/xo-mixins/backups-ng/index.js
Normal file
1106
packages/xo-server/src/xo-mixins/backups-ng/index.js
Normal file
File diff suppressed because it is too large
Load Diff
145
packages/xo-server/src/xo-mixins/backups-ng/migration.js
Normal file
145
packages/xo-server/src/xo-mixins/backups-ng/migration.js
Normal file
@@ -0,0 +1,145 @@
|
||||
// @flow
|
||||
|
||||
import assert from 'assert'
|
||||
|
||||
import { type BackupJob } from '../backups-ng'
|
||||
import { type CallJob } from '../jobs'
|
||||
import { type Schedule } from '../scheduling'
|
||||
|
||||
const createOr = (children: Array<any>): any =>
|
||||
children.length === 1 ? children[0] : { __or: children }
|
||||
|
||||
const methods = {
|
||||
'vm.deltaCopy': (
|
||||
job: CallJob,
|
||||
{ retention = 1, sr, vms },
|
||||
schedule: Schedule
|
||||
) => ({
|
||||
mode: 'delta',
|
||||
settings: {
|
||||
[schedule.id]: {
|
||||
exportRetention: retention,
|
||||
vmTimeout: job.timeout,
|
||||
},
|
||||
},
|
||||
srs: { id: sr },
|
||||
userId: job.userId,
|
||||
vms,
|
||||
}),
|
||||
'vm.rollingDeltaBackup': (
|
||||
job: CallJob,
|
||||
{ depth = 1, retention = depth, remote, vms },
|
||||
schedule: Schedule
|
||||
) => ({
|
||||
mode: 'delta',
|
||||
remotes: { id: remote },
|
||||
settings: {
|
||||
[schedule.id]: {
|
||||
exportRetention: retention,
|
||||
vmTimeout: job.timeout,
|
||||
},
|
||||
},
|
||||
vms,
|
||||
}),
|
||||
'vm.rollingDrCopy': (
|
||||
job: CallJob,
|
||||
{ deleteOldBackupsFirst, depth = 1, retention = depth, sr, vms },
|
||||
schedule: Schedule
|
||||
) => ({
|
||||
mode: 'full',
|
||||
settings: {
|
||||
[schedule.id]: {
|
||||
deleteFirst: deleteOldBackupsFirst,
|
||||
exportRetention: retention,
|
||||
vmTimeout: job.timeout,
|
||||
},
|
||||
},
|
||||
srs: { id: sr },
|
||||
vms,
|
||||
}),
|
||||
'vm.rollingBackup': (
|
||||
job: CallJob,
|
||||
{ compress, depth = 1, retention = depth, remoteId, vms },
|
||||
schedule: Schedule
|
||||
) => ({
|
||||
compression: compress ? 'native' : undefined,
|
||||
mode: 'full',
|
||||
remotes: { id: remoteId },
|
||||
settings: {
|
||||
[schedule.id]: {
|
||||
exportRetention: retention,
|
||||
vmTimeout: job.timeout,
|
||||
},
|
||||
},
|
||||
vms,
|
||||
}),
|
||||
'vm.rollingSnapshot': (
|
||||
job: CallJob,
|
||||
{ depth = 1, retention = depth, vms },
|
||||
schedule: Schedule
|
||||
) => ({
|
||||
mode: 'full',
|
||||
settings: {
|
||||
[schedule.id]: {
|
||||
snapshotRetention: retention,
|
||||
vmTimeout: job.timeout,
|
||||
},
|
||||
},
|
||||
vms,
|
||||
}),
|
||||
}
|
||||
|
||||
const parseParamsVector = (vector: any) => {
|
||||
assert.strictEqual(vector.type, 'crossProduct')
|
||||
const { items } = vector
|
||||
assert.strictEqual(items.length, 2)
|
||||
|
||||
let vms, params
|
||||
if (items[1].type === 'map') {
|
||||
;[params, vms] = items
|
||||
|
||||
vms = vms.collection
|
||||
assert.strictEqual(vms.type, 'fetchObjects')
|
||||
vms = vms.pattern
|
||||
} else {
|
||||
;[vms, params] = items
|
||||
|
||||
assert.strictEqual(vms.type, 'set')
|
||||
vms = vms.values
|
||||
if (vms.length !== 0) {
|
||||
assert.deepStrictEqual(Object.keys(vms[0]), ['id'])
|
||||
vms = { id: createOr(vms.map(_ => _.id)) }
|
||||
}
|
||||
}
|
||||
|
||||
assert.strictEqual(params.type, 'set')
|
||||
params = params.values
|
||||
assert.strictEqual(params.length, 1)
|
||||
params = params[0]
|
||||
|
||||
return { ...params, vms }
|
||||
}
|
||||
|
||||
export const translateLegacyJob = (
|
||||
job: CallJob,
|
||||
schedules: Schedule[]
|
||||
): BackupJob => {
|
||||
const { id } = job
|
||||
let method, schedule
|
||||
if (
|
||||
job.type !== 'call' ||
|
||||
(method = methods[job.method]) === undefined ||
|
||||
(schedule = schedules.find(_ => _.jobId === id)) === undefined
|
||||
) {
|
||||
throw new Error(`cannot convert job ${job.id}`)
|
||||
}
|
||||
const params = parseParamsVector(job.paramsVector)
|
||||
return {
|
||||
id,
|
||||
name: params.tag || job.name,
|
||||
type: 'backup',
|
||||
userId: job.userId,
|
||||
// $FlowFixMe `method` is initialized but Flow fails to see this
|
||||
...method(job, params, schedule),
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,8 @@
|
||||
import deferrable from 'golike-defer'
|
||||
import escapeStringRegexp from 'escape-string-regexp'
|
||||
import eventToPromise from 'event-to-promise'
|
||||
import execa from 'execa'
|
||||
import splitLines from 'split-lines'
|
||||
import { CancelToken, ignoreErrors } from 'promise-toolbox'
|
||||
import { CancelToken, fromEvent, ignoreErrors } from 'promise-toolbox'
|
||||
import { createParser as createPairsParser } from 'parse-pairs'
|
||||
import { createReadStream, readdir, stat } from 'fs'
|
||||
import { satisfies as versionSatisfies } from 'semver'
|
||||
@@ -102,30 +101,6 @@ const getDeltaBackupNameWithoutExt = name =>
|
||||
name.slice(0, -DELTA_BACKUP_EXT_LENGTH)
|
||||
const isDeltaBackup = name => endsWith(name, DELTA_BACKUP_EXT)
|
||||
|
||||
// Checksums have been corrupted between 5.2.6 and 5.2.7.
|
||||
//
|
||||
// For a short period of time, bad checksums will be regenerated
|
||||
// instead of rejected.
|
||||
//
|
||||
// TODO: restore when enough time has passed (a week/a month).
|
||||
async function checkFileIntegrity (handler, name) {
|
||||
await handler.refreshChecksum(name)
|
||||
// let stream
|
||||
//
|
||||
// try {
|
||||
// stream = await handler.createReadStream(name, { checksum: true })
|
||||
// } catch (error) {
|
||||
// if (error.code === 'ENOENT') {
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// throw error
|
||||
// }
|
||||
//
|
||||
// stream.resume()
|
||||
// await eventToPromise(stream, 'finish')
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const listPartitions = (() => {
|
||||
@@ -429,8 +404,7 @@ export default class {
|
||||
})(srcVm.other_config[TAG_LAST_BASE_DELTA])
|
||||
|
||||
// 2. Copy.
|
||||
let size = 0
|
||||
const dstVm = await (async () => {
|
||||
const { transferSize, vm: dstVm } = await (async () => {
|
||||
const { cancel, token } = CancelToken.source()
|
||||
const delta = await srcXapi.exportDeltaVm(
|
||||
token,
|
||||
@@ -452,17 +426,6 @@ export default class {
|
||||
delta.vm.other_config[TAG_EXPORT_TIME] = date
|
||||
delta.vm.tags = [...delta.vm.tags, 'Continuous Replication']
|
||||
|
||||
const { streams } = delta
|
||||
forEach(delta.vdis, (vdi, key) => {
|
||||
const id = `${key}.vhd`
|
||||
const stream = streams[id]
|
||||
const sizeStream = createSizeStream().once('finish', () => {
|
||||
size += sizeStream.size
|
||||
})
|
||||
sizeStream.task = stream.task
|
||||
streams[id] = stream.pipe(sizeStream)
|
||||
})
|
||||
|
||||
let toRemove = filter(
|
||||
targetXapi.objects.all,
|
||||
obj => obj.$type === 'vm' && obj.other_config[TAG_SOURCE_VM] === uuid
|
||||
@@ -508,7 +471,7 @@ export default class {
|
||||
// 5. Return the identifier of the new XO VM object.
|
||||
id: xapiObjectToXo(dstVm).id,
|
||||
transferDuration: Date.now() - transferStart,
|
||||
transferSize: size,
|
||||
transferSize,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -558,15 +521,7 @@ export default class {
|
||||
const backups = await this._listVdiBackups(handler, dir)
|
||||
for (let i = 1; i < backups.length; i++) {
|
||||
const childPath = dir + '/' + backups[i]
|
||||
const modified = await chainVhd(
|
||||
handler,
|
||||
dir + '/' + backups[i - 1],
|
||||
handler,
|
||||
childPath
|
||||
)
|
||||
if (modified) {
|
||||
await handler.refreshChecksum(childPath)
|
||||
}
|
||||
await chainVhd(handler, dir + '/' + backups[i - 1], handler, childPath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -582,8 +537,6 @@ export default class {
|
||||
const timestamp = getVdiTimestamp(backups[i])
|
||||
const newFullBackup = `${dir}/${timestamp}_full.vhd`
|
||||
|
||||
await checkFileIntegrity(handler, `${dir}/${backups[i]}`)
|
||||
|
||||
let j = i
|
||||
for (; j > 0 && isDeltaVdiBackup(backups[j]); j--);
|
||||
const fullBackupId = j
|
||||
@@ -598,7 +551,6 @@ export default class {
|
||||
const backup = `${dir}/${backups[j]}`
|
||||
|
||||
try {
|
||||
await checkFileIntegrity(handler, backup)
|
||||
mergedDataSize += await vhdMerge(handler, parent, handler, backup)
|
||||
} catch (e) {
|
||||
console.error('Unable to use vhd-util.', e)
|
||||
@@ -652,6 +604,10 @@ export default class {
|
||||
xapi,
|
||||
{ vdiParent, isFull, handler, stream, dir, retention }
|
||||
) {
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
|
||||
const backupDirectory = `vdi_${vdiParent.uuid}`
|
||||
dir = `${dir}/${backupDirectory}`
|
||||
|
||||
@@ -675,18 +631,12 @@ export default class {
|
||||
const sizeStream = createSizeStream()
|
||||
|
||||
try {
|
||||
const targetStream = await handler.createOutputStream(backupFullPath, {
|
||||
// FIXME: Checksum is not computed for full vdi backups.
|
||||
// The problem is in the merge case, a delta merged in a full vdi
|
||||
// backup forces us to browse the resulting file =>
|
||||
// Significant transfer time on the network !
|
||||
checksum: !isFull,
|
||||
})
|
||||
const targetStream = await handler.createOutputStream(backupFullPath)
|
||||
|
||||
stream.on('error', error => targetStream.emit('error', error))
|
||||
|
||||
await Promise.all([
|
||||
eventToPromise(stream.pipe(sizeStream).pipe(targetStream), 'finish'),
|
||||
fromEvent(stream.pipe(sizeStream).pipe(targetStream), 'finish'),
|
||||
stream.task,
|
||||
])
|
||||
} catch (error) {
|
||||
@@ -898,10 +848,7 @@ export default class {
|
||||
|
||||
streams[`${id}.vhd`] = await Promise.all(
|
||||
mapToArray(backups, async backup =>
|
||||
handler.createReadStream(`${vdisFolder}/${backup}`, {
|
||||
checksum: true,
|
||||
ignoreMissingChecksum: true,
|
||||
})
|
||||
handler.createReadStream(`${vdisFolder}/${backup}`)
|
||||
)
|
||||
)
|
||||
})
|
||||
@@ -910,11 +857,11 @@ export default class {
|
||||
delta.vm.name_label += ` (${shortDate(datetime * 1e3)})`
|
||||
delta.vm.tags.push('restored from backup')
|
||||
|
||||
vm = await xapi.importDeltaVm(delta, {
|
||||
vm = (await xapi.importDeltaVm(delta, {
|
||||
disableStartAfterImport: false,
|
||||
srId: sr !== undefined && sr._xapiId,
|
||||
mapVdisSrs,
|
||||
})
|
||||
})).vm
|
||||
} else {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
@@ -935,8 +882,6 @@ export default class {
|
||||
$defer.onFailure.call(handler, 'unlink', file)
|
||||
$defer.onFailure.call(targetStream, 'close')
|
||||
|
||||
const promise = eventToPromise(targetStream, 'finish')
|
||||
|
||||
const sourceStream = await this._xo.getXapi(vm).exportVm(vm._xapiId, {
|
||||
compress,
|
||||
})
|
||||
@@ -945,7 +890,7 @@ export default class {
|
||||
|
||||
sourceStream.pipe(sizeStream).pipe(targetStream)
|
||||
|
||||
await promise
|
||||
await Promise.all([sourceStream.task, fromEvent(targetStream, 'finish')])
|
||||
|
||||
return {
|
||||
transferSize: sizeStream.size,
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
import { assign } from 'lodash'
|
||||
import { lastly } from 'promise-toolbox'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import JobExecutor from '../job-executor'
|
||||
import { Jobs as JobsDb } from '../models/job'
|
||||
import { mapToArray } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class Jobs {
|
||||
constructor (xo) {
|
||||
this._executor = new JobExecutor(xo)
|
||||
const jobsDb = (this._jobs = new JobsDb({
|
||||
connection: xo._redis,
|
||||
prefix: 'xo:job',
|
||||
indexes: ['user_id', 'key'],
|
||||
}))
|
||||
this._runningJobs = Object.create(null)
|
||||
|
||||
xo.on('clean', () => jobsDb.rebuildIndexes())
|
||||
xo.on('start', () => {
|
||||
xo.addConfigManager(
|
||||
'jobs',
|
||||
() => jobsDb.get(),
|
||||
jobs => Promise.all(mapToArray(jobs, job => jobsDb.save(job))),
|
||||
['users']
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async getAllJobs () {
|
||||
return /* await */ this._jobs.get()
|
||||
}
|
||||
|
||||
async getJob (id) {
|
||||
const job = await this._jobs.first(id)
|
||||
if (!job) {
|
||||
throw noSuchObject(id, 'job')
|
||||
}
|
||||
|
||||
return job.properties
|
||||
}
|
||||
|
||||
async createJob (job) {
|
||||
// TODO: use plain objects
|
||||
const job_ = await this._jobs.create(job)
|
||||
return job_.properties
|
||||
}
|
||||
|
||||
async updateJob ({ id, ...props }) {
|
||||
const job = await this.getJob(id)
|
||||
|
||||
assign(job, props)
|
||||
if (job.timeout === null) {
|
||||
delete job.timeout
|
||||
}
|
||||
|
||||
return /* await */ this._jobs.save(job)
|
||||
}
|
||||
|
||||
async removeJob (id) {
|
||||
return /* await */ this._jobs.remove(id)
|
||||
}
|
||||
|
||||
_runJob (job) {
|
||||
const { id } = job
|
||||
const runningJobs = this._runningJobs
|
||||
if (runningJobs[id]) {
|
||||
throw new Error(`job ${id} is already running`)
|
||||
}
|
||||
runningJobs[id] = true
|
||||
return this._executor.exec(job)::lastly(() => {
|
||||
delete runningJobs[id]
|
||||
})
|
||||
}
|
||||
|
||||
async runJobSequence (idSequence) {
|
||||
const jobs = await Promise.all(
|
||||
mapToArray(idSequence, id => this.getJob(id))
|
||||
)
|
||||
|
||||
for (const job of jobs) {
|
||||
await this._runJob(job)
|
||||
}
|
||||
}
|
||||
}
|
||||
124
packages/xo-server/src/xo-mixins/jobs/execute-call.js
Normal file
124
packages/xo-server/src/xo-mixins/jobs/execute-call.js
Normal file
@@ -0,0 +1,124 @@
|
||||
import { createPredicate } from 'value-matcher'
|
||||
import { timeout } from 'promise-toolbox'
|
||||
import { assign, filter, isEmpty, map, mapValues } from 'lodash'
|
||||
|
||||
import { crossProduct } from '../../math'
|
||||
import { asyncMap, serializeError, thunkToArray } from '../../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const paramsVectorActionsMap = {
|
||||
extractProperties ({ mapping, value }) {
|
||||
return mapValues(mapping, key => value[key])
|
||||
},
|
||||
crossProduct ({ items }) {
|
||||
return thunkToArray(
|
||||
crossProduct(map(items, value => resolveParamsVector.call(this, value)))
|
||||
)
|
||||
},
|
||||
fetchObjects ({ pattern }) {
|
||||
const objects = filter(this.getObjects(), createPredicate(pattern))
|
||||
if (isEmpty(objects)) {
|
||||
throw new Error('no objects match this pattern')
|
||||
}
|
||||
return objects
|
||||
},
|
||||
map ({ collection, iteratee, paramName = 'value' }) {
|
||||
return map(resolveParamsVector.call(this, collection), value => {
|
||||
return resolveParamsVector.call(this, {
|
||||
...iteratee,
|
||||
[paramName]: value,
|
||||
})
|
||||
})
|
||||
},
|
||||
set: ({ values }) => values,
|
||||
}
|
||||
|
||||
export function resolveParamsVector (paramsVector) {
|
||||
const visitor = paramsVectorActionsMap[paramsVector.type]
|
||||
if (!visitor) {
|
||||
throw new Error(`Unsupported function '${paramsVector.type}'.`)
|
||||
}
|
||||
|
||||
return visitor.call(this, paramsVector)
|
||||
}
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default async function executeJobCall ({
|
||||
app,
|
||||
job,
|
||||
logger,
|
||||
runJobId,
|
||||
schedule,
|
||||
session,
|
||||
}) {
|
||||
const { paramsVector } = job
|
||||
const paramsFlatVector = paramsVector
|
||||
? resolveParamsVector.call(app, paramsVector)
|
||||
: [{}] // One call with no parameters
|
||||
|
||||
const execStatus = {
|
||||
calls: {},
|
||||
runJobId,
|
||||
start: Date.now(),
|
||||
timezone: schedule !== undefined ? schedule.timezone : undefined,
|
||||
}
|
||||
|
||||
await asyncMap(paramsFlatVector, params => {
|
||||
const runCallId = logger.notice(
|
||||
`Starting ${job.method} call. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.start',
|
||||
runJobId,
|
||||
method: job.method,
|
||||
params,
|
||||
}
|
||||
)
|
||||
|
||||
const call = (execStatus.calls[runCallId] = {
|
||||
method: job.method,
|
||||
params,
|
||||
start: Date.now(),
|
||||
})
|
||||
let promise = app.callApiMethod(session, job.method, assign({}, params))
|
||||
if (job.timeout) {
|
||||
promise = promise::timeout(job.timeout)
|
||||
}
|
||||
|
||||
return promise.then(
|
||||
value => {
|
||||
logger.notice(
|
||||
`Call ${job.method} (${runCallId}) is a success. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
returnedValue: value,
|
||||
}
|
||||
)
|
||||
|
||||
call.returnedValue = value
|
||||
call.end = Date.now()
|
||||
},
|
||||
reason => {
|
||||
logger.notice(
|
||||
`Call ${job.method} (${runCallId}) has failed. (${job.id})`,
|
||||
{
|
||||
event: 'jobCall.end',
|
||||
runJobId,
|
||||
runCallId,
|
||||
error: serializeError(reason),
|
||||
}
|
||||
)
|
||||
|
||||
call.error = reason
|
||||
call.end = Date.now()
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
execStatus.end = Date.now()
|
||||
|
||||
return execStatus
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { forEach } from 'lodash'
|
||||
import { resolveParamsVector } from './job-executor'
|
||||
import { resolveParamsVector } from './execute-call'
|
||||
|
||||
describe('resolveParamsVector', function () {
|
||||
forEach(
|
||||
@@ -68,37 +68,35 @@ describe('resolveParamsVector', function () {
|
||||
|
||||
// Context.
|
||||
{
|
||||
xo: {
|
||||
getObjects: function () {
|
||||
return [
|
||||
{
|
||||
id: 'vm:1',
|
||||
$pool: 'pool:1',
|
||||
tags: [],
|
||||
type: 'VM',
|
||||
power_state: 'Halted',
|
||||
},
|
||||
{
|
||||
id: 'vm:2',
|
||||
$pool: 'pool:1',
|
||||
tags: ['foo'],
|
||||
type: 'VM',
|
||||
power_state: 'Running',
|
||||
},
|
||||
{
|
||||
id: 'host:1',
|
||||
type: 'host',
|
||||
power_state: 'Running',
|
||||
},
|
||||
{
|
||||
id: 'vm:3',
|
||||
$pool: 'pool:8',
|
||||
tags: ['foo'],
|
||||
type: 'VM',
|
||||
power_state: 'Halted',
|
||||
},
|
||||
]
|
||||
},
|
||||
getObjects: function () {
|
||||
return [
|
||||
{
|
||||
id: 'vm:1',
|
||||
$pool: 'pool:1',
|
||||
tags: [],
|
||||
type: 'VM',
|
||||
power_state: 'Halted',
|
||||
},
|
||||
{
|
||||
id: 'vm:2',
|
||||
$pool: 'pool:1',
|
||||
tags: ['foo'],
|
||||
type: 'VM',
|
||||
power_state: 'Running',
|
||||
},
|
||||
{
|
||||
id: 'host:1',
|
||||
type: 'host',
|
||||
power_state: 'Running',
|
||||
},
|
||||
{
|
||||
id: 'vm:3',
|
||||
$pool: 'pool:8',
|
||||
tags: ['foo'],
|
||||
type: 'VM',
|
||||
power_state: 'Halted',
|
||||
},
|
||||
]
|
||||
},
|
||||
},
|
||||
],
|
||||
278
packages/xo-server/src/xo-mixins/jobs/index.js
Normal file
278
packages/xo-server/src/xo-mixins/jobs/index.js
Normal file
@@ -0,0 +1,278 @@
|
||||
// @flow
|
||||
|
||||
import type { Pattern } from 'value-matcher'
|
||||
|
||||
import { cancelable } from 'promise-toolbox'
|
||||
import { map as mapToArray } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import Collection from '../../collection/redis'
|
||||
import patch from '../../patch'
|
||||
import { serializeError } from '../../utils'
|
||||
|
||||
import type Logger from '../logs/loggers/abstract'
|
||||
import { type Schedule } from '../scheduling'
|
||||
|
||||
import executeCall from './execute-call'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export type Job = {
|
||||
id: string,
|
||||
name: string,
|
||||
type: string,
|
||||
userId: string,
|
||||
}
|
||||
|
||||
type ParamsVector =
|
||||
| {|
|
||||
items: Array<Object>,
|
||||
type: 'crossProduct',
|
||||
|}
|
||||
| {|
|
||||
mapping: Object,
|
||||
type: 'extractProperties',
|
||||
value: Object,
|
||||
|}
|
||||
| {|
|
||||
pattern: Pattern,
|
||||
type: 'fetchObjects',
|
||||
|}
|
||||
| {|
|
||||
collection: Object,
|
||||
iteratee: Function,
|
||||
paramName?: string,
|
||||
type: 'map',
|
||||
|}
|
||||
| {|
|
||||
type: 'set',
|
||||
values: any,
|
||||
|}
|
||||
|
||||
export type CallJob = {|
|
||||
...$Exact<Job>,
|
||||
method: string,
|
||||
paramsVector: ParamsVector,
|
||||
timeout?: number,
|
||||
type: 'call',
|
||||
|}
|
||||
|
||||
export type Executor = ({|
|
||||
app: Object,
|
||||
cancelToken: any,
|
||||
job: Job,
|
||||
logger: Logger,
|
||||
runJobId: string,
|
||||
schedule?: Schedule,
|
||||
session: Object,
|
||||
|}) => Promise<any>
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const normalize = job => {
|
||||
Object.keys(job).forEach(key => {
|
||||
try {
|
||||
const value = (job[key] = JSON.parse(job[key]))
|
||||
|
||||
// userId are always strings, even if the value is numeric, which might to
|
||||
// them being parsed as numbers.
|
||||
//
|
||||
// The issue has been introduced by
|
||||
// 48b2297bc151df582160be7c1bf1e8ee160320b8.
|
||||
if (key === 'userId' && typeof value === 'number') {
|
||||
job[key] = String(value)
|
||||
}
|
||||
} catch (_) {}
|
||||
})
|
||||
return job
|
||||
}
|
||||
|
||||
const serialize = (job: {| [string]: any |}) => {
|
||||
Object.keys(job).forEach(key => {
|
||||
const value = job[key]
|
||||
if (typeof value !== 'string') {
|
||||
job[key] = JSON.stringify(job[key])
|
||||
}
|
||||
})
|
||||
return job
|
||||
}
|
||||
|
||||
class JobsDb extends Collection {
|
||||
async create (job): Promise<Job> {
|
||||
return normalize((await this.add(serialize((job: any)))).properties)
|
||||
}
|
||||
|
||||
async save (job): Promise<void> {
|
||||
await this.update(serialize((job: any)))
|
||||
}
|
||||
|
||||
async get (properties): Promise<Array<Job>> {
|
||||
const jobs = await super.get(properties)
|
||||
jobs.forEach(normalize)
|
||||
return jobs
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
export default class Jobs {
|
||||
_app: any
|
||||
_executors: { __proto__: null, [string]: Executor }
|
||||
_jobs: JobsDb
|
||||
_logger: Logger
|
||||
_runningJobs: { __proto__: null, [string]: boolean }
|
||||
|
||||
constructor (xo: any) {
|
||||
this._app = xo
|
||||
const executors = (this._executors = { __proto__: null })
|
||||
const jobsDb = (this._jobs = new JobsDb({
|
||||
connection: xo._redis,
|
||||
prefix: 'xo:job',
|
||||
indexes: ['user_id', 'key'],
|
||||
}))
|
||||
this._logger = undefined
|
||||
this._runningJobs = { __proto__: null }
|
||||
|
||||
executors.call = executeCall
|
||||
|
||||
xo.on('clean', () => jobsDb.rebuildIndexes())
|
||||
xo.on('start', () => {
|
||||
xo.addConfigManager(
|
||||
'jobs',
|
||||
() => jobsDb.get(),
|
||||
jobs => Promise.all(mapToArray(jobs, job => jobsDb.save(job))),
|
||||
['users']
|
||||
)
|
||||
|
||||
xo.getLogger('jobs').then(logger => {
|
||||
this._logger = logger
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async getAllJobs (type?: string): Promise<Array<Job>> {
|
||||
// $FlowFixMe don't know what is the problem (JFT)
|
||||
const jobs = await this._jobs.get()
|
||||
const runningJobs = this._runningJobs
|
||||
const result = []
|
||||
jobs.forEach(job => {
|
||||
if (type === undefined || job.type === type) {
|
||||
job.runId = runningJobs[job.id]
|
||||
result.push(job)
|
||||
}
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
async getJob (id: string, type?: string): Promise<Job> {
|
||||
let job = await this._jobs.first(id)
|
||||
if (job === null || (type !== undefined && job.properties.type !== type)) {
|
||||
throw noSuchObject(id, 'job')
|
||||
}
|
||||
|
||||
job = job.properties
|
||||
job.runId = this._runningJobs[id]
|
||||
|
||||
return job
|
||||
}
|
||||
|
||||
createJob (job: $Diff<Job, {| id: string |}>): Promise<Job> {
|
||||
return this._jobs.create(job)
|
||||
}
|
||||
|
||||
async updateJob (job: $Shape<Job>, merge: boolean = true) {
|
||||
if (merge) {
|
||||
const { id, ...props } = job
|
||||
job = await this.getJob(id)
|
||||
patch(job, props)
|
||||
}
|
||||
return /* await */ this._jobs.save(job)
|
||||
}
|
||||
|
||||
registerJobExecutor (type: string, executor: Executor): void {
|
||||
const executors = this._executors
|
||||
if (type in executors) {
|
||||
throw new Error(`there is already a job executor for type ${type}`)
|
||||
}
|
||||
executors[type] = executor
|
||||
}
|
||||
|
||||
async removeJob (id: string) {
|
||||
return /* await */ this._jobs.remove(id)
|
||||
}
|
||||
|
||||
async _runJob (cancelToken: any, job: Job, schedule?: Schedule) {
|
||||
const { id } = job
|
||||
|
||||
const runningJobs = this._runningJobs
|
||||
if (id in runningJobs) {
|
||||
throw new Error(`job ${id} is already running`)
|
||||
}
|
||||
|
||||
const executor = this._executors[job.type]
|
||||
if (executor === undefined) {
|
||||
throw new Error(`cannot run job ${id}: no executor for type ${job.type}`)
|
||||
}
|
||||
|
||||
const logger = this._logger
|
||||
const runJobId = logger.notice(`Starting execution of ${id}.`, {
|
||||
event: 'job.start',
|
||||
userId: job.userId,
|
||||
jobId: id,
|
||||
// $FlowFixMe only defined for CallJob
|
||||
key: job.key,
|
||||
})
|
||||
|
||||
runningJobs[id] = runJobId
|
||||
|
||||
try {
|
||||
const app = this._app
|
||||
const session = app.createUserConnection()
|
||||
session.set('user_id', job.userId)
|
||||
|
||||
const status = await executor({
|
||||
app,
|
||||
cancelToken,
|
||||
job,
|
||||
logger,
|
||||
runJobId,
|
||||
schedule,
|
||||
session,
|
||||
})
|
||||
logger.notice(`Execution terminated for ${job.id}.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
})
|
||||
|
||||
session.close()
|
||||
app.emit('job:terminated', status)
|
||||
} catch (error) {
|
||||
logger.error(`The execution of ${id} has failed.`, {
|
||||
event: 'job.end',
|
||||
runJobId,
|
||||
error: serializeError(error),
|
||||
})
|
||||
throw error
|
||||
} finally {
|
||||
delete runningJobs[id]
|
||||
}
|
||||
}
|
||||
|
||||
@cancelable
|
||||
async runJobSequence (
|
||||
$cancelToken: any,
|
||||
idSequence: Array<string>,
|
||||
schedule?: Schedule
|
||||
) {
|
||||
const jobs = await Promise.all(
|
||||
mapToArray(idSequence, id => this.getJob(id))
|
||||
)
|
||||
|
||||
for (const job of jobs) {
|
||||
if ($cancelToken.requested) {
|
||||
break
|
||||
}
|
||||
await this._runJob($cancelToken, job, schedule)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,7 @@ import Ajv from 'ajv'
|
||||
|
||||
import { PluginsMetadata } from '../models/plugin-metadata'
|
||||
import { invalidParameters, noSuchObject } from 'xo-common/api-errors'
|
||||
import { createRawObject, isFunction, mapToArray } from '../utils'
|
||||
import { isFunction, mapToArray } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -11,7 +11,7 @@ export default class {
|
||||
this._ajv = new Ajv({
|
||||
useDefaults: true,
|
||||
})
|
||||
this._plugins = createRawObject()
|
||||
this._plugins = { __proto__: null }
|
||||
|
||||
this._pluginsMetadata = new PluginsMetadata({
|
||||
connection: xo._redis,
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import RemoteHandlerLocal from '../remote-handlers/local'
|
||||
import RemoteHandlerNfs from '../remote-handlers/nfs'
|
||||
import RemoteHandlerSmb from '../remote-handlers/smb'
|
||||
import { forEach, mapToArray } from '../utils'
|
||||
import { getHandler } from '../remote-handlers'
|
||||
import { Remotes } from '../models/remote'
|
||||
|
||||
// ===================================================================
|
||||
@@ -40,20 +38,7 @@ export default class {
|
||||
throw new Error('remote is disabled')
|
||||
}
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
smb: RemoteHandlerSmb,
|
||||
nfs: RemoteHandlerNfs,
|
||||
}
|
||||
|
||||
// FIXME: should be done in xo-remote-parser.
|
||||
const type = remote.url.split('://')[0]
|
||||
|
||||
const Handler = HANDLERS[type]
|
||||
if (!Handler) {
|
||||
throw new Error('Unhandled remote type')
|
||||
}
|
||||
return new Handler(remote)
|
||||
return getHandler(remote)
|
||||
}
|
||||
|
||||
async testRemote (remote) {
|
||||
|
||||
@@ -335,6 +335,7 @@ export default class {
|
||||
let set
|
||||
if (
|
||||
object.$type !== 'vm' ||
|
||||
object.is_a_snapshot ||
|
||||
// No set for this VM.
|
||||
!(id = xapi.xo.getData(object, 'resourceSet')) ||
|
||||
// Not our set.
|
||||
|
||||
@@ -1,204 +1,169 @@
|
||||
import { BaseError } from 'make-error'
|
||||
// @flow
|
||||
|
||||
import { createSchedule } from '@xen-orchestra/cron'
|
||||
import { noSuchObject } from 'xo-common/api-errors.js'
|
||||
import { keyBy } from 'lodash'
|
||||
import { noSuchObject } from 'xo-common/api-errors'
|
||||
|
||||
import { Schedules } from '../models/schedule'
|
||||
import { forEach, mapToArray } from '../utils'
|
||||
import Collection from '../collection/redis'
|
||||
import patch from '../patch'
|
||||
import { asyncMap } from '../utils'
|
||||
|
||||
// ===================================================================
|
||||
export type Schedule = {|
|
||||
cron: string,
|
||||
enabled: boolean,
|
||||
id: string,
|
||||
jobId: string,
|
||||
name: string,
|
||||
timezone?: string,
|
||||
userId: string,
|
||||
|}
|
||||
|
||||
const _resolveId = scheduleOrId => scheduleOrId.id || scheduleOrId
|
||||
const normalize = schedule => {
|
||||
const { enabled } = schedule
|
||||
if (typeof enabled !== 'boolean') {
|
||||
schedule.enabled = enabled === 'true'
|
||||
}
|
||||
if ('job' in schedule) {
|
||||
schedule.jobId = schedule.job
|
||||
delete schedule.job
|
||||
}
|
||||
return schedule
|
||||
}
|
||||
|
||||
export class SchedulerError extends BaseError {}
|
||||
|
||||
export class ScheduleOverride extends SchedulerError {
|
||||
constructor (scheduleOrId) {
|
||||
super('Schedule ID ' + _resolveId(scheduleOrId) + ' is already added')
|
||||
class Schedules extends Collection {
|
||||
async get (properties) {
|
||||
const schedules = await super.get(properties)
|
||||
schedules.forEach(normalize)
|
||||
return schedules
|
||||
}
|
||||
}
|
||||
|
||||
export class ScheduleNotEnabled extends SchedulerError {
|
||||
constructor (scheduleOrId) {
|
||||
super('Schedule ' + _resolveId(scheduleOrId) + ' is not enabled')
|
||||
}
|
||||
}
|
||||
export default class Scheduling {
|
||||
_app: any
|
||||
_db: {|
|
||||
add: Function,
|
||||
first: Function,
|
||||
get: Function,
|
||||
remove: Function,
|
||||
update: Function,
|
||||
|}
|
||||
_runs: { __proto__: null, [string]: () => void }
|
||||
|
||||
export class ScheduleAlreadyEnabled extends SchedulerError {
|
||||
constructor (scheduleOrId) {
|
||||
super('Schedule ' + _resolveId(scheduleOrId) + ' is already enabled')
|
||||
}
|
||||
}
|
||||
constructor (app: any) {
|
||||
this._app = app
|
||||
|
||||
// ===================================================================
|
||||
|
||||
export default class {
|
||||
constructor (xo) {
|
||||
this.xo = xo
|
||||
const schedules = (this._redisSchedules = new Schedules({
|
||||
connection: xo._redis,
|
||||
const db = (this._db = new Schedules({
|
||||
connection: app._redis,
|
||||
prefix: 'xo:schedule',
|
||||
indexes: ['user_id', 'job'],
|
||||
}))
|
||||
this._scheduleTable = undefined
|
||||
|
||||
xo.on('clean', () => schedules.rebuildIndexes())
|
||||
xo.on('start', () => {
|
||||
xo.addConfigManager(
|
||||
this._runs = { __proto__: null }
|
||||
|
||||
app.on('clean', async () => {
|
||||
const [jobsById, schedules] = await Promise.all([
|
||||
app.getAllJobs().then(_ => keyBy(_, 'id')),
|
||||
app.getAllSchedules(),
|
||||
])
|
||||
|
||||
await db.remove(
|
||||
schedules.filter(_ => !(_.jobId in jobsById)).map(_ => _.id)
|
||||
)
|
||||
|
||||
return db.rebuildIndexes()
|
||||
})
|
||||
app.on('start', async () => {
|
||||
app.addConfigManager(
|
||||
'schedules',
|
||||
() => schedules.get(),
|
||||
schedules_ =>
|
||||
Promise.all(
|
||||
mapToArray(schedules_, schedule => schedules.save(schedule))
|
||||
),
|
||||
() => db.get(),
|
||||
schedules =>
|
||||
asyncMap(schedules, schedule => db.update(normalize(schedule))),
|
||||
['jobs']
|
||||
)
|
||||
|
||||
return this._loadSchedules()
|
||||
const schedules = await this.getAllSchedules()
|
||||
schedules.forEach(schedule => this._start(schedule))
|
||||
})
|
||||
xo.on('stop', () => this._disableAll())
|
||||
}
|
||||
|
||||
_add (schedule) {
|
||||
const { id } = schedule
|
||||
this._schedules[id] = schedule
|
||||
this._scheduleTable[id] = false
|
||||
try {
|
||||
if (schedule.enabled) {
|
||||
this._enable(schedule)
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Scheduling#_add(%s)', id, error)
|
||||
}
|
||||
}
|
||||
|
||||
_exists (scheduleOrId) {
|
||||
const id_ = _resolveId(scheduleOrId)
|
||||
return id_ in this._schedules
|
||||
}
|
||||
|
||||
_isEnabled (scheduleOrId) {
|
||||
return this._scheduleTable[_resolveId(scheduleOrId)]
|
||||
}
|
||||
|
||||
_enable ({ cron, id, job, timezone = 'local' }) {
|
||||
this._cronJobs[id] = createSchedule(cron, timezone).startJob(() =>
|
||||
this.xo.runJobSequence([job])
|
||||
)
|
||||
this._scheduleTable[id] = true
|
||||
}
|
||||
|
||||
_disable (scheduleOrId) {
|
||||
if (!this._exists(scheduleOrId)) {
|
||||
throw noSuchObject(scheduleOrId, 'schedule')
|
||||
}
|
||||
if (!this._isEnabled(scheduleOrId)) {
|
||||
throw new ScheduleNotEnabled(scheduleOrId)
|
||||
}
|
||||
const id = _resolveId(scheduleOrId)
|
||||
this._cronJobs[id]() // Stop cron job.
|
||||
delete this._cronJobs[id]
|
||||
this._scheduleTable[id] = false
|
||||
}
|
||||
|
||||
_disableAll () {
|
||||
forEach(this._scheduleTable, (enabled, id) => {
|
||||
if (enabled) {
|
||||
this._disable(id)
|
||||
}
|
||||
app.on('stop', () => {
|
||||
const runs = this._runs
|
||||
Object.keys(runs).forEach(id => {
|
||||
runs[id]()
|
||||
delete runs[id]
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
get scheduleTable () {
|
||||
return this._scheduleTable
|
||||
}
|
||||
|
||||
async _loadSchedules () {
|
||||
this._schedules = {}
|
||||
this._scheduleTable = {}
|
||||
this._cronJobs = {}
|
||||
|
||||
const schedules = await this.xo.getAllSchedules()
|
||||
|
||||
forEach(schedules, schedule => {
|
||||
this._add(schedule)
|
||||
})
|
||||
}
|
||||
|
||||
async _getSchedule (id) {
|
||||
const schedule = await this._redisSchedules.first(id)
|
||||
|
||||
if (!schedule) {
|
||||
throw noSuchObject(id, 'schedule')
|
||||
}
|
||||
|
||||
return schedule
|
||||
}
|
||||
|
||||
async getSchedule (id) {
|
||||
return (await this._getSchedule(id)).properties
|
||||
}
|
||||
|
||||
async getAllSchedules () {
|
||||
return /* await */ this._redisSchedules.get()
|
||||
}
|
||||
|
||||
async createSchedule (userId, { job, cron, enabled, name, timezone }) {
|
||||
const schedule_ = await this._redisSchedules.create(
|
||||
userId,
|
||||
job,
|
||||
async createSchedule ({
|
||||
cron,
|
||||
enabled,
|
||||
jobId,
|
||||
name,
|
||||
timezone,
|
||||
userId,
|
||||
}: $Diff<Schedule, {| id: string |}>) {
|
||||
const schedule = (await this._db.add({
|
||||
cron,
|
||||
enabled,
|
||||
jobId,
|
||||
name,
|
||||
timezone
|
||||
)
|
||||
const schedule = schedule_.properties
|
||||
|
||||
this._add(schedule)
|
||||
|
||||
timezone,
|
||||
userId,
|
||||
})).properties
|
||||
this._start(schedule)
|
||||
return schedule
|
||||
}
|
||||
|
||||
async updateSchedule (id, { job, cron, enabled, name, timezone }) {
|
||||
const schedule = await this._getSchedule(id)
|
||||
|
||||
if (job !== undefined) schedule.set('job', job)
|
||||
if (cron !== undefined) schedule.set('cron', cron)
|
||||
if (enabled !== undefined) schedule.set('enabled', enabled)
|
||||
if (name !== undefined) schedule.set('name', name)
|
||||
if (timezone === null) {
|
||||
schedule.set('timezone', undefined) // Remove current timezone
|
||||
} else if (timezone !== undefined) {
|
||||
schedule.set('timezone', timezone)
|
||||
}
|
||||
|
||||
await this._redisSchedules.save(schedule)
|
||||
|
||||
const { properties } = schedule
|
||||
|
||||
if (!this._exists(id)) {
|
||||
async getSchedule (id: string): Promise<Schedule> {
|
||||
const schedule = await this._db.first(id)
|
||||
if (schedule === null) {
|
||||
throw noSuchObject(id, 'schedule')
|
||||
}
|
||||
|
||||
// disable the schedule, _add() will enable it if necessary
|
||||
if (this._isEnabled(id)) {
|
||||
this._disable(id)
|
||||
}
|
||||
|
||||
this._add(properties)
|
||||
return schedule.properties
|
||||
}
|
||||
|
||||
async removeSchedule (id) {
|
||||
await this._redisSchedules.remove(id)
|
||||
async getAllSchedules (): Promise<Array<Schedule>> {
|
||||
return this._db.get()
|
||||
}
|
||||
|
||||
try {
|
||||
this._disable(id)
|
||||
} catch (exc) {
|
||||
if (!(exc instanceof SchedulerError)) {
|
||||
throw exc
|
||||
}
|
||||
} finally {
|
||||
delete this._schedules[id]
|
||||
delete this._scheduleTable[id]
|
||||
async deleteSchedule (id: string) {
|
||||
this._stop(id)
|
||||
await this._db.remove(id)
|
||||
}
|
||||
|
||||
async updateSchedule ({
|
||||
cron,
|
||||
enabled,
|
||||
id,
|
||||
jobId,
|
||||
name,
|
||||
timezone,
|
||||
userId,
|
||||
}: $Shape<Schedule>) {
|
||||
const schedule = await this.getSchedule(id)
|
||||
patch(schedule, { cron, enabled, jobId, name, timezone, userId })
|
||||
|
||||
this._start(schedule)
|
||||
|
||||
await this._db.update(schedule)
|
||||
}
|
||||
|
||||
_start (schedule: Schedule) {
|
||||
const { id } = schedule
|
||||
|
||||
this._stop(id)
|
||||
|
||||
if (schedule.enabled) {
|
||||
this._runs[id] = createSchedule(
|
||||
schedule.cron,
|
||||
schedule.timezone
|
||||
).startJob(() => this._app.runJobSequence([schedule.jobId], schedule))
|
||||
}
|
||||
}
|
||||
|
||||
_stop (id: string) {
|
||||
const runs = this._runs
|
||||
if (id in runs) {
|
||||
runs[id]()
|
||||
delete runs[id]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
14
packages/xo-server/src/xo-mixins/workers/index.js
Normal file
14
packages/xo-server/src/xo-mixins/workers/index.js
Normal file
@@ -0,0 +1,14 @@
|
||||
import Worker from 'jest-worker'
|
||||
|
||||
export default class Workers {
|
||||
get worker () {
|
||||
return this._worker
|
||||
}
|
||||
|
||||
constructor (app) {
|
||||
app.on('start', () => {
|
||||
this._worker = new Worker(require.resolve('./worker'))
|
||||
})
|
||||
app.on('stop', () => this._worker.end())
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user